diff --git a/glide.lock b/glide.lock index 6ca8d3d836c4..b60346549085 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: fdb248c68c494301c2b236fe16e13d5dd13c98590244b837f6ffff67c59d1942 -updated: 2019-04-02T12:40:58.982073501-04:00 +hash: c19b0353e3baeabcaea05bf4bdff602f9424cc40532eb32bd5caff7c8385f498 +updated: 2019-04-24T01:23:22.917178542-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -320,7 +320,7 @@ imports: - name: github.com/coreos/go-oidc version: 065b426bd41667456c1a924468f507673629c46b - name: github.com/coreos/go-semver - version: 568e959cd89871e61434c1143528d9162da89ef2 + version: e214231b295a8ea9479f11b70b35d5acf3556d9b subpackages: - semver - name: github.com/coreos/go-systemd @@ -346,7 +346,7 @@ imports: - name: github.com/d2g/dhcp4client version: 6e570ed0a266b730a860ba1068090f683b2c213a - name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 + version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d subpackages: - spew - name: github.com/daviddengcn/go-colortext @@ -432,7 +432,7 @@ imports: subpackages: - spdy - name: github.com/elazarl/go-bindata-assetfs - version: 3dcc96556217539f50599357fb481ac0dc7439b9 + version: 38087fe4dafb822e541b3f7955075cc1c30bd294 - name: github.com/elazarl/goproxy version: c4fc26588b6ef8af07a191fcb6476387bdd46711 - name: github.com/emicklei/go-restful @@ -440,7 +440,7 @@ imports: subpackages: - log - name: github.com/emicklei/go-restful-swagger12 - version: dcef7f55730566d41eae5db10e7d6981829720f6 + version: 7524189396c68dc4b04d53852f9edc00f816b123 - name: github.com/euank/go-kmsg-parser version: 5ba4d492e455a77d25dcf0d2c4acc9f2afebef4e subpackages: @@ -481,7 +481,7 @@ imports: - name: github.com/go-openapi/runtime version: 231d7876b7019dbcbfc97a7ba764379497b67c1d - name: github.com/go-openapi/spec - version: 5bae59e25b21498baea7f9d46e9c147ec106a42e + version: 9d9763c0ce87242cb4611b52b8601987f01eb4ed - name: github.com/go-openapi/strfmt version: 35fe47352985e13cc75f13120d70d26fd764ed51 - name: github.com/go-openapi/swag @@ -619,7 +619,7 @@ imports: - x509 - x509/pkix - name: github.com/google/gofuzz - version: 44d81051d367757e1c7c6a5a86423ece9afcf63c + version: f140a6486e521aad38f5917de355cbf147cc0496 - name: github.com/google/uuid version: 8c31c18f31ede9fc8eae72290a7e7a8064e9b3e3 - name: github.com/googleapis/gnostic @@ -898,7 +898,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: d2f01e7b77a6fc78b328db20285423838419fef7 + version: f969221e02d73f8ecacf32ce24c12872ff8e0819 subpackages: - apps - apps/v1 @@ -944,7 +944,7 @@ imports: - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: 7cc0953bbbb7925e30232c056606d666942bb542 + version: 0255926f53935175fe90b8e7672c4c06c17d79e6 subpackages: - apps/clientset/versioned - apps/clientset/versioned/fake @@ -1014,6 +1014,9 @@ imports: - oauth/informers/externalversions/oauth - oauth/informers/externalversions/oauth/v1 - oauth/listers/oauth/v1 + - operator/clientset/versioned + - operator/clientset/versioned/scheme + - operator/clientset/versioned/typed/operator/v1 - project/clientset/versioned - project/clientset/versioned/fake - project/clientset/versioned/scheme @@ -1070,8 +1073,11 @@ imports: - user/informers/externalversions/user/v1 - user/listers/user/v1 - name: github.com/openshift/library-go - version: dab26bb3a8dc7fccde7227194af755bbff30ce5d + version: 04e2bf481415af9b2583c71f584f1075d0b9960c subpackages: + - pkg/assets + - pkg/assets/create + - pkg/config/client - pkg/config/configdefaults - pkg/config/helpers - pkg/config/leaderelection @@ -1101,7 +1107,7 @@ imports: - pkg/util/status - pkg/util/user - name: github.com/pborman/uuid - version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 + version: 8b1b92947f46224e3b97bb1a3a5b0382be00d31e - name: github.com/pelletier/go-toml version: c01d1270ff3e442a8a57cddc1c92dc1138598194 - name: github.com/peterbourgon/diskv @@ -1495,11 +1501,11 @@ imports: - token - types - name: gopkg.in/inf.v0 - version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 + version: d2d2541c53f18d2a059457998ce2876cc8e67cbf - name: gopkg.in/ldap.v2 version: bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9 - name: gopkg.in/natefinch/lumberjack.v2 - version: 20b71e5b60d756d3d2f80def009790325acc2b23 + version: a96e63847dc3c67d17befa69c303767e2f84e54f - name: gopkg.in/square/go-jose.v2 version: 89060dee6a84df9a4dae49f676f0c755037834f1 subpackages: @@ -1511,7 +1517,7 @@ imports: - name: gopkg.in/yaml.v2 version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api - version: 5cb15d34447165a97c76ed5a60e4e99c8a01ecfe + version: 91a629a47dd4b8816f5d974f41ae0335b6468ec1 repo: https://github.com/openshift/kubernetes-api.git subpackages: - admission/v1beta1 @@ -1549,7 +1555,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 126ddf8324125004347d1241ac4a862e966f038f + version: 3c74db8dd172051b029f91536c681a1b43694809 repo: https://github.com/openshift/kubernetes-apiextensions-apiserver.git subpackages: - pkg/apis/apiextensions @@ -1579,16 +1585,18 @@ imports: - pkg/cmd/server/testing - pkg/controller/establish - pkg/controller/finalizer + - pkg/controller/openapi - pkg/controller/status - pkg/crdserverscheme - pkg/features + - pkg/generated/openapi - pkg/registry/customresource - pkg/registry/customresource/tableconvertor - pkg/registry/customresourcedefinition - test/integration - test/integration/fixtures - name: k8s.io/apimachinery - version: bfd9d19397c4e19add0aff1d5e6455e1e0d2df6d + version: f9b69888dcd7c45ba7b718862a9d21684a991aac repo: https://github.com/openshift/kubernetes-apimachinery.git subpackages: - pkg/api/apitesting @@ -1659,7 +1667,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 1262186583d011799ea06acd41b743c4c47015d6 + version: 0064af82c693aad900dfffb117f91d31ac88c357 repo: https://github.com/openshift/kubernetes-apiserver.git subpackages: - pkg/admission @@ -1786,7 +1794,7 @@ imports: - plugin/pkg/authenticator/token/webhook - plugin/pkg/authorizer/webhook - name: k8s.io/cli-runtime - version: 3f781f603aa23cc80c65b62942581044f71fe343 + version: a4a70814eb00a17b29dd59a8816444650687cd4c repo: https://github.com/openshift/kubernetes-cli-runtime.git subpackages: - pkg/genericclioptions @@ -1794,7 +1802,7 @@ imports: - pkg/genericclioptions/printers - pkg/genericclioptions/resource - name: k8s.io/client-go - version: 8fceaa31a56e2673188c3a2f06262247dd96e614 + version: e7e9d19af44cca1c3aacd92448ed662ce65d5312 repo: https://github.com/openshift/kubernetes-client-go.git subpackages: - discovery @@ -1999,17 +2007,17 @@ imports: - util/testing - util/workqueue - name: k8s.io/cloud-provider - version: 9c9d72d1bf90eb62005f5112f3eea019b272c44b + version: 029ecc113e6d819f75bde3705b0ff3fafb52d397 - name: k8s.io/cluster-bootstrap - version: 0fa624df11e9dda15d6ba720bfc2902cd94ba453 + version: 9f4676fa25619bddda9a767ee734a3478ecf34c3 subpackages: - token/api - token/util - name: k8s.io/code-generator - version: 2bf47b06188b89cc41d68422aa52509f2cbb4cc8 + version: b7d0f818cc1e1fc12b9acc529a1c27835609cdf4 repo: https://github.com/openshift/kubernetes-code-generator.git - name: k8s.io/csi-api - version: 64f278f73b4802ace6e14e515b483d50ae68ab2a + version: 9d860736995cc6f848fece5c5b2abcd1a695a7bd repo: https://github.com/openshift/kubernetes-csi-api.git subpackages: - pkg/apis/csi/v1alpha1 @@ -2030,7 +2038,7 @@ imports: - name: k8s.io/klog version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f - name: k8s.io/kube-aggregator - version: ae5524b65c1081a5696a26f6b233db26eb1065f4 + version: d054841b60b8d23b66472fbd8fe78bf512aee522 repo: https://github.com/openshift/kubernetes-kube-aggregator.git subpackages: - pkg/apis/apiregistration @@ -2055,12 +2063,13 @@ imports: - pkg/controllers - pkg/controllers/autoregister - pkg/controllers/openapi + - pkg/controllers/openapi/aggregator - pkg/controllers/status - pkg/registry/apiservice - pkg/registry/apiservice/etcd - pkg/registry/apiservice/rest - name: k8s.io/kube-controller-manager - version: 2d4c0aa6bcf313838e331b5d87ba14fe0694d9a7 + version: 3f43d9caee53ceb3afbd9f58c2b3a1ff5b27b82d repo: https://github.com/openshift/kubernetes-kube-controller-manager.git subpackages: - config/v1alpha1 @@ -2077,20 +2086,20 @@ imports: - pkg/util/proto/testing - pkg/util/proto/validation - name: k8s.io/kube-proxy - version: 93400f02525d2590ef3b763c2b2a211cbe7788fc + version: 9a78b562076bf2bd5369b835543eeaee2cbfce76 repo: https://github.com/openshift/kubernetes-kube-proxy.git subpackages: - config/v1alpha1 - name: k8s.io/kube-scheduler - version: 284499a248131c34f30a90354139743e61e8e923 + version: b912abf0dded52946a76d048567df62f0334f0cb repo: https://github.com/openshift/kubernetes-kube-scheduler.git - name: k8s.io/kubelet - version: b2baec5a487a3ad344d60bd012d1c9464f517011 + version: e283b92471f6e196920a6a95ceac4d54d058b8b1 repo: https://github.com/openshift/kubernetes-kubelet.git subpackages: - config/v1beta1 - name: k8s.io/kubernetes - version: 00ef801c64ccdebd08d44d186d4d484a8fce3a1b + version: 8735f4df8055ba913ca37deedad27cbc3e576400 repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/cloud-controller-manager/app/apis/config @@ -2913,7 +2922,7 @@ imports: - third_party/forked/gonum/graph/simple - third_party/forked/gonum/graph/traverse - name: k8s.io/metrics - version: 34472d076c304d1dc65b4dc115aebf99f202d06e + version: 093b5bb370eafdd1f3c88e6a6ae6675186a39809 repo: https://github.com/openshift/kubernetes-metrics.git subpackages: - pkg/apis/custom_metrics @@ -2932,16 +2941,16 @@ imports: - pkg/client/custom_metrics/scheme - pkg/client/external_metrics - name: k8s.io/sample-apiserver - version: 7167784fdc46a7f5aaaf88b9aa1acb8cf14ae5ba + version: 8023f632fb5fc9424c2bd5f69282d1549029ffcd repo: https://github.com/openshift/kubernetes-sample-apiserver.git subpackages: - pkg/apis/wardle - pkg/apis/wardle/v1alpha1 - name: k8s.io/sample-cli-plugin - version: 3e274ef7dda882e51dde11fae3660e22373119f7 + version: 9a5b06ebe01fd257c8f66745b06182e91d8ba3cc repo: https://github.com/openshift/kubernetes-sample-cli-plugin.git - name: k8s.io/sample-controller - version: 518f432aa760d8a9b7b6810057fcc50e787a8798 + version: 867a7679da98dcfaedecc8f486d90ee324eb4b5e repo: https://github.com/openshift/kubernetes-sample-controller.git - name: k8s.io/utils version: 66066c83e385e385ccc3c964b44fd7dcd413d0ed @@ -2951,7 +2960,7 @@ imports: - exec/testing - pointer - name: sigs.k8s.io/yaml - version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 + version: 199c9c29c4e4f08dc72163605467ab691a004022 - name: vbom.ml/util version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394 subpackages: diff --git a/glide.yaml b/glide.yaml index 6be99994011f..978385ab54a7 100644 --- a/glide.yaml +++ b/glide.yaml @@ -66,6 +66,8 @@ import: - package: k8s.io/kube-openapi repo: https://github.com/openshift/kube-openapi.git version: origin-4.0-kubernetes-master-d7c86cd # bumped to match k8s version we've had and then additionally with changes for openapi CRD (we should pick that back around k8s 1.14) +- package: k8s.io/klog + version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f # recent klog bump broke glog flags test in k8s.io/apiserver, pin - package: github.com/coreos/etcd version: v3.3.10 - package: google.golang.org/grpc diff --git a/pkg/monitor/operator.go b/pkg/monitor/operator.go index a5d533459f16..2a5a4d7d3c5b 100644 --- a/pkg/monitor/operator.go +++ b/pkg/monitor/operator.go @@ -50,7 +50,10 @@ func startClusterOperatorMonitoring(ctx context.Context, m Recorder, client conf msg = fmt.Sprintf("changed %s to %s", s.Type, s.Status) } level := Warning - if s.Type == configv1.OperatorFailing && s.Status == configv1.ConditionTrue { + if s.Type == configv1.OperatorDegraded && s.Status == configv1.ConditionTrue { + level = Error + } + if s.Type == configv1.ClusterStatusConditionType("Failing") && s.Status == configv1.ConditionTrue { level = Error } conditions = append(conditions, Condition{ @@ -188,7 +191,10 @@ func startClusterOperatorMonitoring(ctx context.Context, m Recorder, client conf msg = fmt.Sprintf("changed %s to %s", s.Type, s.Status) } level := Warning - if s.Type == configv1.OperatorFailing && s.Status == configv1.ConditionTrue { + if s.Type == configv1.OperatorDegraded && s.Status == configv1.ConditionTrue { + level = Error + } + if s.Type == configv1.ClusterStatusConditionType("Failing") && s.Status == configv1.ConditionTrue { level = Error } conditions = append(conditions, Condition{ diff --git a/pkg/oc/cli/admin/upgrade/upgrade.go b/pkg/oc/cli/admin/upgrade/upgrade.go index a7e0e826556e..fc54514f3b7e 100644 --- a/pkg/oc/cli/admin/upgrade/upgrade.go +++ b/pkg/oc/cli/admin/upgrade/upgrade.go @@ -240,7 +240,7 @@ func (o *Options) Run() error { return nil default: - if c := findCondition(cv.Status.Conditions, configv1.OperatorFailing); c != nil && c.Status == configv1.ConditionTrue { + if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil && c.Status == configv1.ConditionTrue { prefix := "No upgrade is possible due to an error" if c := findCondition(cv.Status.Conditions, configv1.OperatorProgressing); c != nil && c.Status == configv1.ConditionTrue && len(c.Message) > 0 { prefix = c.Message @@ -380,7 +380,7 @@ func checkForUpgrade(cv *configv1.ClusterVersion) error { if c := findCondition(cv.Status.Conditions, "Invalid"); c != nil && c.Status == configv1.ConditionTrue { return fmt.Errorf("The cluster version object is invalid, you must correct the invalid state first.\n\n Reason: %s\n Message: %s\n\n", c.Reason, c.Message) } - if c := findCondition(cv.Status.Conditions, configv1.OperatorFailing); c != nil && c.Status == configv1.ConditionTrue { + if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil && c.Status == configv1.ConditionTrue { return fmt.Errorf("The cluster is experiencing an upgrade-blocking error, use --force to upgrade anyway.\n\n Reason: %s\n Message: %s\n\n", c.Reason, c.Message) } if c := findCondition(cv.Status.Conditions, configv1.OperatorProgressing); c != nil && c.Status == configv1.ConditionTrue { diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index 62230af15f5e..e640eaea3f98 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -476,13 +476,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/operator/v1.OpenShiftControllerManagerSpec": schema_openshift_api_operator_v1_OpenShiftControllerManagerSpec(ref), "github.com/openshift/api/operator/v1.OpenShiftControllerManagerStatus": schema_openshift_api_operator_v1_OpenShiftControllerManagerStatus(ref), "github.com/openshift/api/operator/v1.OpenShiftSDNConfig": schema_openshift_api_operator_v1_OpenShiftSDNConfig(ref), - "github.com/openshift/api/operator/v1.OperandContainerSpec": schema_openshift_api_operator_v1_OperandContainerSpec(ref), - "github.com/openshift/api/operator/v1.OperandSpec": schema_openshift_api_operator_v1_OperandSpec(ref), "github.com/openshift/api/operator/v1.OperatorCondition": schema_openshift_api_operator_v1_OperatorCondition(ref), "github.com/openshift/api/operator/v1.OperatorSpec": schema_openshift_api_operator_v1_OperatorSpec(ref), "github.com/openshift/api/operator/v1.OperatorStatus": schema_openshift_api_operator_v1_OperatorStatus(ref), "github.com/openshift/api/operator/v1.ProxyConfig": schema_openshift_api_operator_v1_ProxyConfig(ref), - "github.com/openshift/api/operator/v1.ResourcePatch": schema_openshift_api_operator_v1_ResourcePatch(ref), "github.com/openshift/api/operator/v1.ServiceCA": schema_openshift_api_operator_v1_ServiceCA(ref), "github.com/openshift/api/operator/v1.ServiceCAList": schema_openshift_api_operator_v1_ServiceCAList(ref), "github.com/openshift/api/operator/v1.ServiceCASpec": schema_openshift_api_operator_v1_ServiceCASpec(ref), @@ -8237,7 +8234,7 @@ func schema_openshift_api_config_v1_ClusterVersionStatus(ref common.ReferenceCal }, "conditions": { SchemaProps: spec.SchemaProps{ - Description: "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Failing\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + Description: "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -9878,7 +9875,7 @@ func schema_openshift_api_config_v1_InfrastructureStatus(ref common.ReferenceCal }, "platform": { SchemaProps: spec.SchemaProps{ - Description: "platform is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + Description: "platform is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", Type: []string{"string"}, Format: "", }, @@ -11824,6 +11821,13 @@ func schema_openshift_api_config_v1_Update(ref common.ReferenceCallback) common. Format: "", }, }, + "force": { + SchemaProps: spec.SchemaProps{ + Description: "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, @@ -11870,8 +11874,15 @@ func schema_openshift_api_config_v1_UpdateHistory(ref common.ReferenceCallback) Format: "", }, }, + "verified": { + SchemaProps: spec.SchemaProps{ + Description: "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, - Required: []string{"state", "startedTime", "completionTime", "image"}, + Required: []string{"state", "startedTime", "completionTime", "image", "verified"}, }, }, Dependencies: []string{ @@ -21198,17 +21209,11 @@ func schema_openshift_api_operator_v1_AuthenticationSpec(ref common.ReferenceCal Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -21228,7 +21233,7 @@ func schema_openshift_api_operator_v1_AuthenticationSpec(ref common.ReferenceCal }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -21455,17 +21460,11 @@ func schema_openshift_api_operator_v1_ConsoleSpec(ref common.ReferenceCallback) Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -21491,7 +21490,7 @@ func schema_openshift_api_operator_v1_ConsoleSpec(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.ConsoleCustomization", "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "github.com/openshift/api/operator/v1.ConsoleCustomization", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -21880,17 +21879,11 @@ func schema_openshift_api_operator_v1_EtcdSpec(ref common.ReferenceCallback) com Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -21905,6 +21898,13 @@ func schema_openshift_api_operator_v1_EtcdSpec(ref common.ReferenceCallback) com Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, + "forceRedeploymentReason": { + SchemaProps: spec.SchemaProps{ + Description: "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + Type: []string{"string"}, + Format: "", + }, + }, "failedRevisionLimit": { SchemaProps: spec.SchemaProps{ Description: "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", @@ -21919,19 +21919,12 @@ func schema_openshift_api_operator_v1_EtcdSpec(ref common.ReferenceCallback) com Format: "int32", }, }, - "forceRedeploymentReason": { - SchemaProps: spec.SchemaProps{ - Description: "forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", - Type: []string{"string"}, - Format: "", - }, - }, }, Required: []string{"managementState", "forceRedeploymentReason"}, }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -22395,17 +22388,11 @@ func schema_openshift_api_operator_v1_KubeAPIServerSpec(ref common.ReferenceCall Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -22420,6 +22407,13 @@ func schema_openshift_api_operator_v1_KubeAPIServerSpec(ref common.ReferenceCall Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, + "forceRedeploymentReason": { + SchemaProps: spec.SchemaProps{ + Description: "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + Type: []string{"string"}, + Format: "", + }, + }, "failedRevisionLimit": { SchemaProps: spec.SchemaProps{ Description: "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", @@ -22434,19 +22428,12 @@ func schema_openshift_api_operator_v1_KubeAPIServerSpec(ref common.ReferenceCall Format: "int32", }, }, - "forceRedeploymentReason": { - SchemaProps: spec.SchemaProps{ - Description: "forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", - Type: []string{"string"}, - Format: "", - }, - }, }, Required: []string{"managementState", "forceRedeploymentReason"}, }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -22646,17 +22633,11 @@ func schema_openshift_api_operator_v1_KubeControllerManagerSpec(ref common.Refer Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -22671,6 +22652,13 @@ func schema_openshift_api_operator_v1_KubeControllerManagerSpec(ref common.Refer Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, + "forceRedeploymentReason": { + SchemaProps: spec.SchemaProps{ + Description: "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + Type: []string{"string"}, + Format: "", + }, + }, "failedRevisionLimit": { SchemaProps: spec.SchemaProps{ Description: "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", @@ -22685,19 +22673,12 @@ func schema_openshift_api_operator_v1_KubeControllerManagerSpec(ref common.Refer Format: "int32", }, }, - "forceRedeploymentReason": { - SchemaProps: spec.SchemaProps{ - Description: "forceRedeploymentReason can be used to force the redeployment of the kube-controller-manager by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", - Type: []string{"string"}, - Format: "", - }, - }, }, Required: []string{"managementState", "forceRedeploymentReason"}, }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -22897,17 +22878,11 @@ func schema_openshift_api_operator_v1_KubeSchedulerSpec(ref common.ReferenceCall Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -22922,6 +22897,13 @@ func schema_openshift_api_operator_v1_KubeSchedulerSpec(ref common.ReferenceCall Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, + "forceRedeploymentReason": { + SchemaProps: spec.SchemaProps{ + Description: "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + Type: []string{"string"}, + Format: "", + }, + }, "failedRevisionLimit": { SchemaProps: spec.SchemaProps{ Description: "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", @@ -22936,19 +22918,12 @@ func schema_openshift_api_operator_v1_KubeSchedulerSpec(ref common.ReferenceCall Format: "int32", }, }, - "forceRedeploymentReason": { - SchemaProps: spec.SchemaProps{ - Description: "forceRedeploymentReason can be used to force the redeployment of the kube-scheduler by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", - Type: []string{"string"}, - Format: "", - }, - }, }, Required: []string{"managementState", "forceRedeploymentReason"}, }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -23099,17 +23074,11 @@ func schema_openshift_api_operator_v1_MyOperatorResourceSpec(ref common.Referenc Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -23129,7 +23098,7 @@ func schema_openshift_api_operator_v1_MyOperatorResourceSpec(ref common.Referenc }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -23392,11 +23361,24 @@ func schema_openshift_api_operator_v1_NodePlacement(ref common.ReferenceCallback Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } @@ -23590,17 +23572,11 @@ func schema_openshift_api_operator_v1_OpenShiftAPIServerSpec(ref common.Referenc Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -23620,7 +23596,7 @@ func schema_openshift_api_operator_v1_OpenShiftAPIServerSpec(ref common.Referenc }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -23800,17 +23776,11 @@ func schema_openshift_api_operator_v1_OpenShiftControllerManagerSpec(ref common. Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -23830,7 +23800,7 @@ func schema_openshift_api_operator_v1_OpenShiftControllerManagerSpec(ref common. }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -23938,83 +23908,6 @@ func schema_openshift_api_operator_v1_OpenShiftSDNConfig(ref common.ReferenceCal } } -func schema_openshift_api_operator_v1_OperandContainerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name is the name of the container to modify", - Type: []string{"string"}, - Format: "", - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "resources are the requests and limits to place in the container. Nil means to accept the defaults.", - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ResourceRequirements"}, - } -} - -func schema_openshift_api_operator_v1_OperandSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OperandSpec holds information for customization of a particular functional unit - logically maps to a workload", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name is the name of this unit. The operator must be aware of it.", - Type: []string{"string"}, - Format: "", - }, - }, - "operandContainerSpecs": { - SchemaProps: spec.SchemaProps{ - Description: "operandContainerSpecs are per-container options", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandContainerSpec"), - }, - }, - }, - }, - }, - "unsupportedResourcePatches": { - SchemaProps: spec.SchemaProps{ - Description: "unsupportedResourcePatches are applied to the workload resource for this unit. This is an unsupported workaround if anything needs to be modified on the workload that is not otherwise configurable.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.ResourcePatch"), - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandContainerSpec", "github.com/openshift/api/operator/v1.ResourcePatch"}, - } -} - func schema_openshift_api_operator_v1_OperatorCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -24081,17 +23974,11 @@ func schema_openshift_api_operator_v1_OperatorSpec(ref common.ReferenceCallback) Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -24111,7 +23998,7 @@ func schema_openshift_api_operator_v1_OperatorSpec(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -24226,34 +24113,6 @@ func schema_openshift_api_operator_v1_ProxyConfig(ref common.ReferenceCallback) } } -func schema_openshift_api_operator_v1_ResourcePatch(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourcePatch is a way to represent the patch you would issue to `kubectl patch` in the API", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "type is the type of patch to apply: jsonmerge, strategicmerge", - Type: []string{"string"}, - Format: "", - }, - }, - "patch": { - SchemaProps: spec.SchemaProps{ - Description: "patch the patch itself", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "patch"}, - }, - }, - } -} - func schema_openshift_api_operator_v1_ServiceCA(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -24282,12 +24141,14 @@ func schema_openshift_api_operator_v1_ServiceCA(ref common.ReferenceCallback) co }, "spec": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.ServiceCASpec"), + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/operator/v1.ServiceCASpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.ServiceCAStatus"), + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/operator/v1.ServiceCAStatus"), }, }, }, @@ -24368,17 +24229,11 @@ func schema_openshift_api_operator_v1_ServiceCASpec(ref common.ReferenceCallback Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -24398,7 +24253,7 @@ func schema_openshift_api_operator_v1_ServiceCASpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -24578,17 +24433,11 @@ func schema_openshift_api_operator_v1_ServiceCatalogAPIServerSpec(ref common.Ref Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -24608,7 +24457,7 @@ func schema_openshift_api_operator_v1_ServiceCatalogAPIServerSpec(ref common.Ref }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -24788,17 +24637,11 @@ func schema_openshift_api_operator_v1_ServiceCatalogControllerManagerSpec(ref co Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -24818,7 +24661,7 @@ func schema_openshift_api_operator_v1_ServiceCatalogControllerManagerSpec(ref co }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -24905,17 +24748,11 @@ func schema_openshift_api_operator_v1_StaticPodOperatorSpec(ref common.Reference Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -24930,6 +24767,13 @@ func schema_openshift_api_operator_v1_StaticPodOperatorSpec(ref common.Reference Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, + "forceRedeploymentReason": { + SchemaProps: spec.SchemaProps{ + Description: "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + Type: []string{"string"}, + Format: "", + }, + }, "failedRevisionLimit": { SchemaProps: spec.SchemaProps{ Description: "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", @@ -24945,11 +24789,11 @@ func schema_openshift_api_operator_v1_StaticPodOperatorSpec(ref common.Reference }, }, }, - Required: []string{"managementState"}, + Required: []string{"managementState", "forceRedeploymentReason"}, }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -27542,9 +27386,16 @@ func schema_openshift_api_route_v1_RouteSpec(ref common.ReferenceCallback) commo Format: "", }, }, + "subdomain": { + SchemaProps: spec.SchemaProps{ + Description: "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", + Type: []string{"string"}, + Format: "", + }, + }, "path": { SchemaProps: spec.SchemaProps{ - Description: "Path that the router watches for, to route traffic for to the service. Optional", + Description: "path that the router watches for, to route traffic for to the service. Optional", Type: []string{"string"}, Format: "", }, @@ -28916,17 +28767,11 @@ func schema_openshift_api_servicecertsigner_v1alpha1_ServiceCertSignerOperatorCo Format: "", }, }, - "operandSpecs": { + "operatorLogLevel": { SchemaProps: spec.SchemaProps{ - Description: "operandSpecs provide customization for functional units within the component", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/operator/v1.OperandSpec"), - }, - }, - }, + Description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", + Type: []string{"string"}, + Format: "", }, }, "unsupportedConfigOverrides": { @@ -28964,7 +28809,7 @@ func schema_openshift_api_servicecertsigner_v1alpha1_ServiceCertSignerOperatorCo }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.OperandSpec", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } diff --git a/pkg/route/apis/route/types.go b/pkg/route/apis/route/types.go index 8772a8d873c5..9ff70b34fafc 100644 --- a/pkg/route/apis/route/types.go +++ b/pkg/route/apis/route/types.go @@ -25,6 +25,21 @@ type RouteSpec struct { // Host is an alias/DNS that points to the service. Optional // Must follow DNS952 subdomain conventions. Host string + + // Subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + Subdomain string + // Path that the router watches for, to route traffic for to the service. Optional Path string diff --git a/pkg/route/apis/route/v1/zz_generated.conversion.go b/pkg/route/apis/route/v1/zz_generated.conversion.go index 80e965c89940..7ff742e89cf9 100644 --- a/pkg/route/apis/route/v1/zz_generated.conversion.go +++ b/pkg/route/apis/route/v1/zz_generated.conversion.go @@ -258,6 +258,7 @@ func Convert_route_RoutePort_To_v1_RoutePort(in *route.RoutePort, out *v1.RouteP func autoConvert_v1_RouteSpec_To_route_RouteSpec(in *v1.RouteSpec, out *route.RouteSpec, s conversion.Scope) error { out.Host = in.Host + out.Subdomain = in.Subdomain out.Path = in.Path if err := Convert_v1_RouteTargetReference_To_route_RouteTargetReference(&in.To, &out.To, s); err != nil { return err @@ -276,6 +277,7 @@ func Convert_v1_RouteSpec_To_route_RouteSpec(in *v1.RouteSpec, out *route.RouteS func autoConvert_route_RouteSpec_To_v1_RouteSpec(in *route.RouteSpec, out *v1.RouteSpec, s conversion.Scope) error { out.Host = in.Host + out.Subdomain = in.Subdomain out.Path = in.Path if err := Convert_route_RouteTargetReference_To_v1_RouteTargetReference(&in.To, &out.To, s); err != nil { return err diff --git a/pkg/route/controller/ingress/ingress_test.go b/pkg/route/controller/ingress/ingress_test.go index 63d4ee85f51c..8305fa0a3252 100644 --- a/pkg/route/controller/ingress/ingress_test.go +++ b/pkg/route/controller/ingress/ingress_test.go @@ -761,7 +761,7 @@ func TestController_sync(t *testing.T) { wantPatches: []clientgotesting.PatchActionImpl{ { Name: "1-abcdef", - Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080}}}]`), + Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","subdomain":"","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080}}}]`), }, }, }, @@ -1025,7 +1025,7 @@ func TestController_sync(t *testing.T) { wantPatches: []clientgotesting.PatchActionImpl{ { Name: "1-abcdef", - Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080}}}]`), + Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","subdomain":"","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080}}}]`), }, }, }, @@ -1087,7 +1087,7 @@ func TestController_sync(t *testing.T) { wantPatches: []clientgotesting.PatchActionImpl{ { Name: "1-abcdef", - Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080},"tls":{"termination":"edge","certificate":"cert","key":"key","insecureEdgeTerminationPolicy":"Redirect"}}}]`), + Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","subdomain":"","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080},"tls":{"termination":"edge","certificate":"cert","key":"key","insecureEdgeTerminationPolicy":"Redirect"}}}]`), }, }, }, @@ -1154,7 +1154,7 @@ func TestController_sync(t *testing.T) { wantPatches: []clientgotesting.PatchActionImpl{ { Name: "1-abcdef", - Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080},"tls":{"termination":"edge","certificate":"cert","key":"key2"}}}]`), + Patch: []byte(`[{"op":"replace","path":"/spec","value":{"host":"test.com","subdomain":"","path":"/","to":{"kind":"","name":"service-1","weight":null},"port":{"targetPort":8080},"tls":{"termination":"edge","certificate":"cert","key":"key2"}}}]`), }, }, }, diff --git a/test/e2e/upgrade/upgrade.go b/test/e2e/upgrade/upgrade.go index 406564688204..10c84a31dd02 100644 --- a/test/e2e/upgrade/upgrade.go +++ b/test/e2e/upgrade/upgrade.go @@ -244,7 +244,10 @@ func getUpgradeContext(c configv1client.Interface, upgradeTarget, upgradeImage s return nil, fmt.Errorf("cluster is already being upgraded, cannot start a test: %s", versionString(*cv.Spec.DesiredUpdate)) } } - if c := findCondition(cv.Status.Conditions, configv1.OperatorFailing); c != nil && c.Status == configv1.ConditionTrue { + if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil && c.Status == configv1.ConditionTrue { + return nil, fmt.Errorf("cluster is reporting a degraded condition, cannot continue: %v", c.Message) + } + if c := findCondition(cv.Status.Conditions, configv1.ClusterStatusConditionType("Failing")); c != nil && c.Status == configv1.ConditionTrue { return nil, fmt.Errorf("cluster is reporting a failing condition, cannot continue: %v", c.Message) } if c := findCondition(cv.Status.Conditions, configv1.OperatorProgressing); c == nil || c.Status != configv1.ConditionFalse { @@ -357,7 +360,13 @@ func clusterUpgrade(c configv1client.Interface, version upgrades.VersionContext) } } - if c := findCondition(cv.Status.Conditions, configv1.OperatorFailing); c != nil { + if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil { + if c.Status == configv1.ConditionTrue { + framework.Logf("cluster upgrade is degraded: %v", c.Message) + } + } + + if c := findCondition(cv.Status.Conditions, configv1.ClusterStatusConditionType("Failing")); c != nil { if c.Status == configv1.ConditionTrue { framework.Logf("cluster upgrade is failing: %v", c.Message) } @@ -377,7 +386,12 @@ func clusterUpgrade(c configv1client.Interface, version upgrades.VersionContext) return false, fmt.Errorf("cluster version was Progressing=true after completion: %v", cv.Status.Conditions) } } - if c := findCondition(cv.Status.Conditions, configv1.OperatorFailing); c != nil { + if c := findCondition(cv.Status.Conditions, configv1.OperatorDegraded); c != nil { + if c.Status == configv1.ConditionTrue { + return false, fmt.Errorf("cluster version was Degraded=true after completion: %v", cv.Status.Conditions) + } + } + if c := findCondition(cv.Status.Conditions, configv1.ClusterStatusConditionType("Failing")); c != nil { if c.Status == configv1.ConditionTrue { return false, fmt.Errorf("cluster version was Failing=true after completion: %v", cv.Status.Conditions) } @@ -398,7 +412,7 @@ func clusterUpgrade(c configv1client.Interface, version upgrades.VersionContext) "%s\t%s %s %s\t%s\t%s\n", item.Name, findConditionShortStatus(item.Status.Conditions, configv1.OperatorAvailable, configv1.ConditionTrue), - findConditionShortStatus(item.Status.Conditions, configv1.OperatorFailing, configv1.ConditionFalse), + findConditionShortStatus(item.Status.Conditions, configv1.OperatorDegraded, configv1.ConditionFalse), findConditionShortStatus(item.Status.Conditions, configv1.OperatorProgressing, configv1.ConditionFalse), findVersion(item.Status.Versions, "operator", oldVersion, lastCV.Status.Desired.Version), findConditionMessage(item.Status.Conditions, configv1.OperatorProgressing), diff --git a/vendor/github.com/coreos/go-semver/.travis.yml b/vendor/github.com/coreos/go-semver/.travis.yml index fdd60c66e807..05f548c9ab66 100644 --- a/vendor/github.com/coreos/go-semver/.travis.yml +++ b/vendor/github.com/coreos/go-semver/.travis.yml @@ -1,5 +1,8 @@ language: go +sudo: false go: - - 1.1 + - 1.4 + - 1.5 + - 1.6 - tip script: cd semver && go test diff --git a/vendor/github.com/coreos/go-semver/DCO b/vendor/github.com/coreos/go-semver/DCO new file mode 100644 index 000000000000..716561d5d282 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 000000000000..23a0ada2fbb5 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/README.md b/vendor/github.com/coreos/go-semver/README.md index 800063345020..5bc9263cfbba 100644 --- a/vendor/github.com/coreos/go-semver/README.md +++ b/vendor/github.com/coreos/go-semver/README.md @@ -1,6 +1,7 @@ # go-semver - Semantic Versioning Library -[![Build Status](https://travis-ci.org/coreos/go-semver.png)](https://travis-ci.org/coreos/go-semver) +[![Build Status](https://travis-ci.org/coreos/go-semver.svg?branch=master)](https://travis-ci.org/coreos/go-semver) +[![GoDoc](https://godoc.org/github.com/coreos/go-semver/semver?status.svg)](https://godoc.org/github.com/coreos/go-semver/semver) go-semver is a [semantic versioning][semver] library for Go. It lets you parse and compare two semantic version strings. @@ -9,9 +10,9 @@ and compare two semantic version strings. ## Usage -``` -vA, err := semver.NewVersion("1.2.3") -vB, err := semver.NewVersion("3.2.1") +```go +vA := semver.New("1.2.3") +vB := semver.New("3.2.1") fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) ``` @@ -25,7 +26,3 @@ $ go run example.go 1.2.3 3.2.1 $ go run example.go 5.2.3 3.2.1 5.2.3 < 3.2.1 == false ``` - -## TODO - -- Richer comparision operations diff --git a/vendor/github.com/coreos/go-semver/code-of-conduct.md b/vendor/github.com/coreos/go-semver/code-of-conduct.md new file mode 100644 index 000000000000..a234f3609d09 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/code-of-conduct.md @@ -0,0 +1,61 @@ +## CoreOS Community Code of Conduct + +### Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing others' private information, such as physical or electronic addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently applying these +principles to every aspect of managing this project. Project maintainers who do +not follow or enforce the Code of Conduct may be permanently removed from the +project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer, Brandon Philips +, and/or Rithu John . + +This Code of Conduct is adapted from the Contributor Covenant +(http://contributor-covenant.org), version 1.2.0, available at +http://contributor-covenant.org/version/1/2/0/ + +### CoreOS Events Code of Conduct + +CoreOS events are working conferences intended for professional networking and +collaboration in the CoreOS community. Attendees are expected to behave +according to professional standards and in accordance with their employer’s +policies on appropriate workplace behavior. + +While at CoreOS events or related social networking opportunities, attendees +should not engage in discriminatory or offensive speech or actions including +but not limited to gender, sexuality, race, age, disability, or religion. +Speakers should be especially aware of these concerns. + +CoreOS does not condone any statements by speakers contrary to these standards. +CoreOS reserves the right to deny entrance and/or eject from an event (without +refund) any individual found to be engaging in discriminatory or offensive +speech or actions. + +Please bring any concerns to the immediate attention of designated on-site +staff, Brandon Philips , and/or Rithu John . diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go index f1f8ab797390..76cf4852c769 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -1,9 +1,25 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org package semver import ( "bytes" "errors" "fmt" + "regexp" "strconv" "strings" ) @@ -29,17 +45,45 @@ func splitOff(input *string, delim string) (val string) { return val } +func New(version string) *Version { + return Must(NewVersion(version)) +} + func NewVersion(version string) (*Version, error) { v := Version{} + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) dotParts := strings.SplitN(version, ".", 3) if len(dotParts) != 3 { - return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version)) + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) } - v.Metadata = splitOff(&dotParts[2], "+") - v.PreRelease = PreRelease(splitOff(&dotParts[2], "-")) + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } parsed := make([]int64, 3, 3) @@ -47,63 +91,83 @@ func NewVersion(version string) (*Version, error) { val, err := strconv.ParseInt(v, 10, 64) parsed[i] = val if err != nil { - return nil, err + return err } } + v.Metadata = metadata + v.PreRelease = preRelease v.Major = parsed[0] v.Minor = parsed[1] v.Patch = parsed[2] - - return &v, nil + return nil } -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - return v -} - -func (v *Version) String() string { +func (v Version) String() string { var buffer bytes.Buffer - base := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) - buffer.WriteString(base) + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) if v.PreRelease != "" { - buffer.WriteString(fmt.Sprintf("-%s", v.PreRelease)) + fmt.Fprintf(&buffer, "-%s", v.PreRelease) } if v.Metadata != "" { - buffer.WriteString(fmt.Sprintf("+%s", v.Metadata)) + fmt.Fprintf(&buffer, "+%s", v.Metadata) } return buffer.String() } -func (v *Version) LessThan(versionB Version) bool { - versionA := *v - cmp := recursiveCompare(versionA.Slice(), versionB.Slice()) +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} - if cmp == 0 { - cmp = preReleaseCompare(versionA, versionB) +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") } + return v.Set(string(data[1 : l-1])) +} - if cmp == -1 { - return true +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} - return false +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 } -/* Slice converts the comparable parts of the semver into a slice of strings */ -func (v *Version) Slice() []int64 { +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { return []int64{v.Major, v.Minor, v.Patch} } -func (p *PreRelease) Slice() []string { - preRelease := string(*p) +func (p PreRelease) Slice() []string { + preRelease := string(p) return strings.Split(preRelease, ".") } @@ -119,7 +183,7 @@ func preReleaseCompare(versionA Version, versionB Version) int { return -1 } - // If there is a prelease, check and compare each part. + // If there is a prerelease, check and compare each part. return recursivePreReleaseCompare(a.Slice(), b.Slice()) } @@ -141,9 +205,12 @@ func recursiveCompare(versionA []int64, versionB []int64) int { } func recursivePreReleaseCompare(versionA []string, versionB []string) int { - // Handle slice length disparity. + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. if len(versionA) == 0 { - // Nothing to compare too, so we return 0 + if len(versionB) > 0 { + return -1 + } return 0 } else if len(versionB) == 0 { // We're longer than versionB so return 1. @@ -153,7 +220,8 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int { a := versionA[0] b := versionB[0] - aInt := false; bInt := false + aInt := false + bInt := false aI, err := strconv.Atoi(versionA[0]) if err == nil { @@ -165,6 +233,13 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int { bInt = true } + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + // Handle Integer Comparison if aInt && bInt { if aI > bI { @@ -207,3 +282,15 @@ func (v *Version) BumpPatch() { v.PreRelease = PreRelease("") v.Metadata = "" } + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/semver_test.go b/vendor/github.com/coreos/go-semver/semver/semver_test.go index 9bfc3b8a9bed..3abcab2a56ba 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver_test.go +++ b/vendor/github.com/coreos/go-semver/semver/semver_test.go @@ -1,16 +1,36 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package semver import ( + "bytes" + "encoding/json" "errors" + "flag" + "fmt" "math/rand" "reflect" "testing" "time" + + "gopkg.in/yaml.v2" ) type fixture struct { - greaterVersion string - lesserVersion string + GreaterVersion string + LesserVersion string } var fixtures = []fixture{ @@ -52,23 +72,39 @@ var fixtures = []fixture{ fixture{"1.0.0-beta", "1.0.0-alpha.beta"}, fixture{"1.0.0-alpha.beta", "1.0.0-alpha.1"}, fixture{"1.0.0-alpha.1", "1.0.0-alpha"}, + fixture{"1.2.3-rc.1-1-1hash", "1.2.3-rc.2"}, } func TestCompare(t *testing.T) { for _, v := range fixtures { - gt, err := NewVersion(v.greaterVersion) + gt, err := NewVersion(v.GreaterVersion) if err != nil { t.Error(err) } - lt, err := NewVersion(v.lesserVersion) + lt, err := NewVersion(v.LesserVersion) if err != nil { t.Error(err) } - if gt.LessThan(*lt) == true { + if gt.LessThan(*lt) { t.Errorf("%s should not be less than %s", gt, lt) } + if gt.Equal(*lt) { + t.Errorf("%s should not be equal to %s", gt, lt) + } + if gt.Compare(*lt) <= 0 { + t.Errorf("%s should be greater than %s", gt, lt) + } + if !lt.LessThan(*gt) { + t.Errorf("%s should be less than %s", lt, gt) + } + if !lt.Equal(*lt) { + t.Errorf("%s should be equal to %s", lt, lt) + } + if lt.Compare(*gt) > 0 { + t.Errorf("%s should not be greater than %s", lt, gt) + } } } @@ -80,17 +116,17 @@ func testString(t *testing.T, orig string, version *Version) { func TestString(t *testing.T) { for _, v := range fixtures { - gt, err := NewVersion(v.greaterVersion) + gt, err := NewVersion(v.GreaterVersion) if err != nil { t.Error(err) } - testString(t, v.greaterVersion, gt) + testString(t, v.GreaterVersion, gt) - lt, err := NewVersion(v.lesserVersion) + lt, err := NewVersion(v.LesserVersion) if err != nil { t.Error(err) } - testString(t, v.lesserVersion, lt) + testString(t, v.LesserVersion, lt) } } @@ -141,7 +177,7 @@ func TestBumpMajor(t *testing.T) { version, _ = NewVersion("1.0.0+build.1-alpha.1") version.BumpMajor() - if version.PreRelease != "" && version.PreRelease != "" { + if version.PreRelease != "" && version.Metadata != "" { t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) } } @@ -160,7 +196,7 @@ func TestBumpMinor(t *testing.T) { version, _ = NewVersion("1.0.0+build.1-alpha.1") version.BumpMinor() - if version.PreRelease != "" && version.PreRelease != "" { + if version.PreRelease != "" && version.Metadata != "" { t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) } } @@ -183,7 +219,7 @@ func TestBumpPatch(t *testing.T) { version, _ = NewVersion("1.0.0+build.1-alpha.1") version.BumpPatch() - if version.PreRelease != "" && version.PreRelease != "" { + if version.PreRelease != "" && version.Metadata != "" { t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) } } @@ -221,3 +257,117 @@ func TestMust(t *testing.T) { }() } } + +type fixtureJSON struct { + GreaterVersion *Version + LesserVersion *Version +} + +func TestJSON(t *testing.T) { + fj := make([]fixtureJSON, len(fixtures)) + for i, v := range fixtures { + var err error + fj[i].GreaterVersion, err = NewVersion(v.GreaterVersion) + if err != nil { + t.Fatal(err) + } + fj[i].LesserVersion, err = NewVersion(v.LesserVersion) + if err != nil { + t.Fatal(err) + } + } + + fromStrings, err := json.Marshal(fixtures) + if err != nil { + t.Fatal(err) + } + fromVersions, err := json.Marshal(fj) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fromStrings, fromVersions) { + t.Errorf("Expected: %s", fromStrings) + t.Errorf("Unexpected: %s", fromVersions) + } + + fromJson := make([]fixtureJSON, 0, len(fj)) + err = json.Unmarshal(fromStrings, &fromJson) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(fromJson, fj) { + t.Error("Expected: ", fj) + t.Error("Unexpected: ", fromJson) + } +} + +func TestYAML(t *testing.T) { + document, err := yaml.Marshal(fixtures) + if err != nil { + t.Fatal(err) + } + + expected := make([]fixtureJSON, len(fixtures)) + for i, v := range fixtures { + var err error + expected[i].GreaterVersion, err = NewVersion(v.GreaterVersion) + if err != nil { + t.Fatal(err) + } + expected[i].LesserVersion, err = NewVersion(v.LesserVersion) + if err != nil { + t.Fatal(err) + } + } + + fromYAML := make([]fixtureJSON, 0, len(fixtures)) + err = yaml.Unmarshal(document, &fromYAML) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(fromYAML, expected) { + t.Error("Expected: ", expected) + t.Error("Unexpected: ", fromYAML) + } +} + +func TestBadInput(t *testing.T) { + bad := []string{ + "1.2", + "1.2.3x", + "0x1.3.4", + "-1.2.3", + "1.2.3.4", + "0.88.0-11_e4e5dcabb", + "0.88.0+11_e4e5dcabb", + } + for _, b := range bad { + if _, err := NewVersion(b); err == nil { + t.Error("Improperly accepted value: ", b) + } + } +} + +func TestFlag(t *testing.T) { + v := Version{} + f := flag.NewFlagSet("version", flag.ContinueOnError) + f.Var(&v, "version", "set version") + + if err := f.Set("version", "1.2.3"); err != nil { + t.Fatal(err) + } + + if v.String() != "1.2.3" { + t.Errorf("Set wrong value %q", v) + } +} + +func ExampleVersion_LessThan() { + vA := New("1.2.3") + vB := New("3.2.1") + + fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) + // Output: + // 1.2.3 < 3.2.1 == true +} diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go index 86203007ae9f..e256b41a5ddf 100644 --- a/vendor/github.com/coreos/go-semver/semver/sort.go +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -1,3 +1,17 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package semver import ( diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml index 984e0736e7d2..82e742fa669a 100644 --- a/vendor/github.com/davecgh/go-spew/.travis.yml +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -1,14 +1,29 @@ language: go +go_import_path: github.com/davecgh/go-spew go: - - 1.5.4 - - 1.6.3 - - 1.7 + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip +sudo: false install: - - go get -v golang.org/x/tools/cmd/cover + - go get -v github.com/alecthomas/gometalinter + - gometalinter --install script: - - go test -v -tags=safe ./spew - - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov + - export PATH=$PATH:$HOME/gopath/bin + - export GORACE="halt_on_error=1" + - test -z "$(gometalinter --disable-all + --enable=gofmt + --enable=golint + --enable=vet + --enable=gosimple + --enable=unconvert + --deadline=4m ./spew | tee /dev/stderr)" + - go test -v -race -tags safe ./spew + - go test -v -race -tags testcgo ./spew -covermode=atomic -coverprofile=profile.cov after_success: - go get -v github.com/mattn/goveralls - - export PATH=$PATH:$HOME/gopath/bin - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c836416192da..bc52e96f2b0e 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md index 262430449b27..f6ed02c3b672 100644 --- a/vendor/github.com/davecgh/go-spew/README.md +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -1,12 +1,9 @@ go-spew ======= -[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] -(https://travis-ci.org/davecgh/go-spew) [![ISC License] -(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] -(https://img.shields.io/coveralls/davecgh/go-spew.svg)] -(https://coveralls.io/r/davecgh/go-spew?branch=master) - +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)](https://travis-ci.org/davecgh/go-spew) +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) +[![Coverage Status](https://img.shields.io/coveralls/davecgh/go-spew.svg)](https://coveralls.io/r/davecgh/go-spew?branch=master) Go-spew implements a deep pretty printer for Go data structures to aid in debugging. A comprehensive suite of tests with 100% test coverage is provided @@ -21,8 +18,7 @@ post about it ## Documentation -[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] -(http://godoc.org/github.com/davecgh/go-spew/spew) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/davecgh/go-spew/spew) Full `go doc` style documentation for the project can be viewed online without installing this package by using the excellent GoDoc site here: diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a2d4..792994785e36 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5d10..205c28d68c47 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a728a..f78d89fc1f6c 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go index 5aad9c7af0bc..4a31a2ee37fe 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -768,7 +768,7 @@ func addUintptrDumpTests() { func addUnsafePointerDumpTests() { // Null pointer. - v := unsafe.Pointer(uintptr(0)) + v := unsafe.Pointer(nil) nv := (*unsafe.Pointer)(nil) pv := &v vAddr := fmt.Sprintf("%p", pv) diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go index 6ab180809a6d..108baa55f1db 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -82,18 +82,20 @@ func addCgoDumpTests() { v5Len := fmt.Sprintf("%d", v5l) v5Cap := fmt.Sprintf("%d", v5c) v5t := "[6]testdata._Ctype_uint8_t" + v5t2 := "[6]testdata._Ctype_uchar" v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + "{\n 00000000 74 65 73 74 35 00 " + " |test5.|\n}" - addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(v5, "("+v5t+") "+v5s+"\n", "("+v5t2+") "+v5s+"\n") // C typedefed unsigned char array. v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() v6Len := fmt.Sprintf("%d", v6l) v6Cap := fmt.Sprintf("%d", v6c) v6t := "[6]testdata._Ctype_custom_uchar_t" + v6t2 := "[6]testdata._Ctype_uchar" v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + "{\n 00000000 74 65 73 74 36 00 " + " |test6.|\n}" - addDumpTest(v6, "("+v6t+") "+v6s+"\n") + addDumpTest(v6, "("+v6t+") "+v6s+"\n", "("+v6t2+") "+v6s+"\n") } diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bacbb8..b04edb7d7ac2 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go index f9b93abe86ba..87ee9651e363 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -1083,7 +1083,7 @@ func addUintptrFormatterTests() { func addUnsafePointerFormatterTests() { // Null pointer. - v := unsafe.Pointer(uintptr(0)) + v := unsafe.Pointer(nil) nv := (*unsafe.Pointer)(nil) pv := &v vAddr := fmt.Sprintf("%p", pv) @@ -1536,14 +1536,14 @@ func TestPrintSortedKeys(t *testing.T) { t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) } - s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + s = cfg.Sprint(map[testStruct]int{{1}: 1, {3}: 3, {2}: 2}) expected = "map[ts.1:1 ts.2:2 ts.3:3]" if s != expected { t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) } if !spew.UnsafeDisabled { - s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + s = cfg.Sprint(map[testStructP]int{{1}: 1, {3}: 3, {2}: 2}) expected = "map[ts.1:1 ts.2:2 ts.3:3]" if s != expected { t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go index 20a9cfefc617..e312b4fadc07 100644 --- a/vendor/github.com/davecgh/go-spew/spew/internal_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -36,10 +36,7 @@ type dummyFmtState struct { } func (dfs *dummyFmtState) Flag(f int) bool { - if f == int('+') { - return true - } - return false + return f == int('+') } func (dfs *dummyFmtState) Precision() (int, bool) { diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go index a0c612ec3d56..80dc22177db2 100644 --- a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -16,7 +16,7 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// +build !js,!appengine,!safe,!disableunsafe,go1.4 /* This test file is part of the spew package rather than than the spew_test @@ -30,7 +30,6 @@ import ( "bytes" "reflect" "testing" - "unsafe" ) // changeKind uses unsafe to intentionally change the kind of a reflect.Value to @@ -38,13 +37,13 @@ import ( // fallback code which punts to the standard fmt library for new types that // might get added to the language. func changeKind(v *reflect.Value, readOnly bool) { - rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) - *rvf = *rvf | ((1<= 200 && k < 300 { + responseCodes = append(responseCodes, k) } } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } return o.Responses.Default, 0, false } @@ -99,10 +118,7 @@ func (o *Operation) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } - if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &o.VendorExtensible) } // MarshalJSON converts this items object to JSON @@ -216,7 +232,7 @@ func (o *Operation) AddParam(param *Parameter) *Operation { // RemoveParam removes a parameter from the operation func (o *Operation) RemoveParam(name, in string) *Operation { for i, p := range o.Parameters { - if p.Name == name && p.In == name { + if p.Name == name && p.In == in { o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) return o } @@ -257,3 +273,126 @@ func (o *Operation) RespondsWith(code int, response *Response) *Operation { o.Responses.StatusCodeResponses[code] = *response return o } + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/operation_test.go b/vendor/github.com/go-openapi/spec/operation_test.go index 0bc721d85a06..3ed427e48f80 100644 --- a/vendor/github.com/go-openapi/spec/operation_test.go +++ b/vendor/github.com/go-openapi/spec/operation_test.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "testing" @@ -56,7 +58,7 @@ var operation = Operation{ }, } -var operationJSON = `{ +const operationJSON = `{ "description": "operation description", "x-framework": "go-swagger", "consumes": [ "application/json", "application/x-yaml" ], @@ -75,6 +77,179 @@ var operationJSON = `{ } }` +func TestSuccessResponse(t *testing.T) { + ope := &Operation{} + resp, n, f := ope.SuccessResponse() + assert.Nil(t, resp) + assert.Equal(t, 0, n) + assert.Equal(t, false, f) + + resp, n, f = operation.SuccessResponse() + if assert.NotNil(t, resp) { + assert.Equal(t, "void response", resp.Description) + } + assert.Equal(t, 0, n) + assert.Equal(t, false, f) + + err := json.Unmarshal([]byte(operationJSON), ope) + if !assert.Nil(t, err) { + t.FailNow() + } + ope = ope.RespondsWith(301, &Response{ + ResponseProps: ResponseProps{ + Description: "failure", + }, + }) + resp, n, f = ope.SuccessResponse() + if assert.NotNil(t, resp) { + assert.Equal(t, "void response", resp.Description) + } + assert.Equal(t, 0, n) + assert.Equal(t, false, f) + + ope = ope.RespondsWith(200, &Response{ + ResponseProps: ResponseProps{ + Description: "success", + }, + }) + + resp, n, f = ope.SuccessResponse() + if assert.NotNil(t, resp) { + assert.Equal(t, "success", resp.Description) + } + assert.Equal(t, 200, n) + assert.Equal(t, true, f) +} + +func TestOperationBuilder(t *testing.T) { + ope := NewOperation("").WithID("operationID") + ope = ope.RespondsWith(200, &Response{ + ResponseProps: ResponseProps{ + Description: "success", + }, + }). + WithDefaultResponse(&Response{ + ResponseProps: ResponseProps{ + Description: "default", + }, + }). + SecuredWith("scheme-name", "scope1", "scope2"). + WithConsumes("application/json"). + WithProduces("application/json"). + Deprecate(). + WithTags("this", "that"). + AddParam(nil). + AddParam(QueryParam("myQueryParam").Typed("integer", "int32")). + AddParam(QueryParam("myQueryParam").Typed("string", "hostname")). + AddParam(PathParam("myPathParam").Typed("string", "uuid")). + WithDescription("test operation"). + WithSummary("my summary"). + WithExternalDocs("some doc", "https://www.example.com") + + jazon, _ := json.MarshalIndent(ope, "", " ") + assert.JSONEq(t, `{ + "operationId": "operationID", + "description": "test operation", + "summary": "my summary", + "externalDocs": { + "description": "some doc", + "url": "https://www.example.com" + }, + "security": [ + { + "scheme-name": [ + "scope1", + "scope2" + ] + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "this", + "that" + ], + "deprecated": true, + "parameters": [ + { + "type": "string", + "format": "hostname", + "name": "myQueryParam", + "in": "query" + }, + { + "type": "string", + "format": "uuid", + "name": "myPathParam", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "success" + }, + "default": { + "description": "default" + } + } + }`, string(jazon)) + + // check token lookup + token, err := ope.JSONLookup("responses") + assert.NoError(t, err) + jazon, _ = json.MarshalIndent(token, "", " ") + assert.JSONEq(t, `{ + "200": { + "description": "success" + }, + "default": { + "description": "default" + } + }`, string(jazon)) + + // check delete methods + ope = ope.RespondsWith(200, nil). + RemoveParam("myQueryParam", "query"). + RemoveParam("myPathParam", "path"). + RemoveParam("fakeParam", "query"). + Undeprecate(). + WithExternalDocs("", "") + jazon, _ = json.MarshalIndent(ope, "", " ") + assert.JSONEq(t, `{ + "security": [ + { + "scheme-name": [ + "scope1", + "scope2" + ] + } + ], + "description": "test operation", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "this", + "that" + ], + "summary": "my summary", + "operationId": "operationID", + "responses": { + "default": { + "description": "default" + } + } + }`, string(jazon)) +} + func TestIntegrationOperation(t *testing.T) { var actual Operation if assert.NoError(t, json.Unmarshal([]byte(operationJSON), &actual)) { @@ -103,5 +278,99 @@ func TestSecurityProperty(t *testing.T) { assert.Equal(t, securityContainsEmptyArray, props) } } +} +func TestOperationGobEncoding(t *testing.T) { + // 1. empty scope in security requirements: "security": [ { "apiKey": [] } ], + doTestOperationGobEncoding(t, operationJSON) + + // 2. nil security requirements + doTestOperationGobEncoding(t, `{ + "description": "operation description", + "x-framework": "go-swagger", + "consumes": [ "application/json", "application/x-yaml" ], + "produces": [ "application/json", "application/x-yaml" ], + "schemes": ["http", "https"], + "tags": ["dogs"], + "summary": "the summary of the operation", + "operationId": "sendCat", + "deprecated": true, + "parameters": [{"$ref":"Cat"}], + "responses": { + "default": { + "description": "void response" + } + } + }`) + + // 3. empty security requirement + doTestOperationGobEncoding(t, `{ + "description": "operation description", + "x-framework": "go-swagger", + "consumes": [ "application/json", "application/x-yaml" ], + "produces": [ "application/json", "application/x-yaml" ], + "schemes": ["http", "https"], + "tags": ["dogs"], + "security": [], + "summary": "the summary of the operation", + "operationId": "sendCat", + "deprecated": true, + "parameters": [{"$ref":"Cat"}], + "responses": { + "default": { + "description": "void response" + } + } + }`) + + // 4. non-empty security requirements + doTestOperationGobEncoding(t, `{ + "description": "operation description", + "x-framework": "go-swagger", + "consumes": [ "application/json", "application/x-yaml" ], + "produces": [ "application/json", "application/x-yaml" ], + "schemes": ["http", "https"], + "tags": ["dogs"], + "summary": "the summary of the operation", + "security": [ { "scoped-auth": [ "phone", "email" ] , "api-key": []} ], + "operationId": "sendCat", + "deprecated": true, + "parameters": [{"$ref":"Cat"}], + "responses": { + "default": { + "description": "void response" + } + } + }`) +} + +func doTestOperationGobEncoding(t *testing.T, fixture string) { + var src, dst Operation + + if !assert.NoError(t, json.Unmarshal([]byte(fixture), &src)) { + t.FailNow() + } + + doTestAnyGobEncoding(t, &src, &dst) +} + +func doTestAnyGobEncoding(t *testing.T, src, dst interface{}) { + expectedJSON, _ := json.MarshalIndent(src, "", " ") + + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(src) + if !assert.NoError(t, err) { + t.FailNow() + } + + err = gob.NewDecoder(&b).Decode(dst) + if !assert.NoError(t, err) { + t.FailNow() + } + + jazon, err := json.MarshalIndent(dst, "", " ") + if !assert.NoError(t, err) { + t.FailNow() + } + assert.JSONEq(t, string(expectedJSON), string(jazon)) } diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index cb1a88d252a4..cecdff54568d 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -39,7 +39,8 @@ func PathParam(name string) *Parameter { // BodyParam creates a body parameter func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, + SimpleSchema: SimpleSchema{Type: "object"}} } // FormDataParam creates a body parameter @@ -49,12 +50,15 @@ func FormDataParam(name string) *Parameter { // FileParam creates a body parameter func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} } // SimpleArrayParam creates a param for a simple array (string, int, date etc) func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} } // ParamRef creates a parameter that's a json reference @@ -65,25 +69,43 @@ func ParamRef(uri string) *Parameter { } // ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" type ParamProps struct { Description string `json:"description,omitempty"` Name string `json:"name,omitempty"` In string `json:"in,omitempty"` Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` // when in == "body" - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. // * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. // * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { @@ -99,7 +121,7 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } @@ -148,7 +170,7 @@ func (p *Parameter) Typed(tpe, format string) *Parameter { // CollectionOf a fluent builder method for an array parameter func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = "array" + p.Type = jsonArray p.Items = items p.CollectionFormat = format return p @@ -270,10 +292,7 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.ParamProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.ParamProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/parameters_test.go b/vendor/github.com/go-openapi/spec/parameters_test.go index 043b859ed389..02b926680ac5 100644 --- a/vendor/github.com/go-openapi/spec/parameters_test.go +++ b/vendor/github.com/go-openapi/spec/parameters_test.go @@ -154,3 +154,11 @@ func TestParameterSerialization(t *testing.T) { `{"type":"object","in":"body","schema":{"type":"array","items":{"$ref":"Cat"}}}`) } + +func TestParameterGobEncoding(t *testing.T) { + var src, dst Parameter + if !assert.NoError(t, json.Unmarshal([]byte(parameterJSON), &src)) { + t.FailNow() + } + doTestAnyGobEncoding(t, &src, &dst) +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go index a8ae63ece52b..68fc8e90144e 100644 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -50,7 +50,7 @@ func (p PathItem) JSONLookup(token string) (interface{}, error) { if ex, ok := p.Extensions[token]; ok { return &ex, nil } - if token == "$ref" { + if token == jsonRef { return &p.Ref, nil } r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) @@ -65,10 +65,7 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { return err } - if err := json.Unmarshal(data, &p.PathItemProps); err != nil { - return err - } - return nil + return json.Unmarshal(data, &p.PathItemProps) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/path_item_test.go b/vendor/github.com/go-openapi/spec/path_item_test.go index 0cd76f602bfc..3c78e903c24a 100644 --- a/vendor/github.com/go-openapi/spec/path_item_test.go +++ b/vendor/github.com/go-openapi/spec/path_item_test.go @@ -58,7 +58,7 @@ var pathItem = PathItem{ }, } -var pathItemJSON = `{ +const pathItemJSON = `{ "$ref": "Dog", "x-framework": "go-swagger", "get": { "description": "get operation description" }, diff --git a/vendor/github.com/go-openapi/spec/paths_test.go b/vendor/github.com/go-openapi/spec/paths_test.go index 025ddf4c6fee..ff5626a684b7 100644 --- a/vendor/github.com/go-openapi/spec/paths_test.go +++ b/vendor/github.com/go-openapi/spec/paths_test.go @@ -30,7 +30,7 @@ var paths = Paths{ }, } -var pathsJSON = `{"x-framework":"go-swagger","/":{"$ref":"cats"}}` +const pathsJSON = `{"x-framework":"go-swagger","/":{"$ref":"cats"}}` func TestIntegrationPaths(t *testing.T) { var actual Paths diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 1405bfd8ee2b..08ff869b2fcd 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "net/http" "os" @@ -148,6 +150,28 @@ func (r *Ref) UnmarshalJSON(d []byte) error { return r.fromMap(v) } +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + func (r *Ref) fromMap(v map[string]interface{}) error { if v == nil { return nil diff --git a/vendor/github.com/go-openapi/spec/ref_test.go b/vendor/github.com/go-openapi/spec/ref_test.go new file mode 100644 index 000000000000..7dc2f59e29b9 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref_test.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +// pin pointing go-swagger/go-swagger#1816 issue with cloning ref's +func TestCloneRef(t *testing.T) { + var b bytes.Buffer + src := MustCreateRef("#/definitions/test") + err := gob.NewEncoder(&b).Encode(&src) + if !assert.NoError(t, err) { + t.FailNow() + } + + var dst Ref + err = gob.NewDecoder(&b).Decode(&dst) + if !assert.NoError(t, err) { + t.FailNow() + } + + jazon, err := json.Marshal(dst) + if !assert.NoError(t, err) { + t.FailNow() + } + + assert.Equal(t, `{"$ref":"#/definitions/test"}`, string(jazon)) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index 586db0d78017..27729c1d93b1 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -58,10 +58,7 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &r.VendorExtensible) } // MarshalJSON converts this items object to JSON diff --git a/vendor/github.com/go-openapi/spec/response_test.go b/vendor/github.com/go-openapi/spec/response_test.go index ba89a491f1b9..71578abaa8fe 100644 --- a/vendor/github.com/go-openapi/spec/response_test.go +++ b/vendor/github.com/go-openapi/spec/response_test.go @@ -34,7 +34,7 @@ var response = Response{ }, } -var responseJSON = `{ +const responseJSON = `{ "$ref": "Dog", "x-go-name": "PutDogExists", "description": "Dog exists", @@ -88,3 +88,26 @@ func TestJSONLookupResponse(t *testing.T) { return } } + +func TestResponseBuild(t *testing.T) { + resp := NewResponse(). + WithDescription("some response"). + WithSchema(new(Schema).Typed("object", "")). + AddHeader("x-header", ResponseHeader().Typed("string", "")). + AddExample("application/json", `{"key":"value"}`) + jazon, _ := json.MarshalIndent(resp, "", " ") + assert.JSONEq(t, `{ + "description": "some response", + "schema": { + "type": "object" + }, + "headers": { + "x-header": { + "type": "string" + } + }, + "examples": { + "application/json": "{\"key\":\"value\"}" + } + }`, string(jazon)) +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index b9481e29bcdb..ce30d26e3244 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -89,7 +89,8 @@ func DateTimeProperty() *Schema { // MapProperty creates a map property func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} } // RefProperty creates a ref property @@ -155,54 +156,6 @@ func (r *SchemaURL) fromMap(v map[string]interface{}) error { return nil } -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } - // SchemaProps describes a JSON schema (draft 4) type SchemaProps struct { ID string `json:"id,omitempty"` @@ -351,7 +304,7 @@ func (s *Schema) AddType(tpe, format string) *Schema { // CollectionOf a fluent builder method for an array parameter func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{"array"} + s.Type = []string{jsonArray} s.Items = &SchemaOrArray{Schema: &items} return s } diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 000000000000..c34a96fa04e7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext + loadDoc func(string) (json.RawMessage, error) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return r, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r, nil + } + + // Set a new root to resolve against + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) + if err != nil { + return nil, err + } + + return resolver, nil +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + refURL := ref.GetURL() + if refURL == nil { + return nil + } + + var res interface{} + var data interface{} + var err error + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeFileRef(ref, basePath) + debugLog("current ref is: %s", ref.String()) + debugLog("current ref normalized file: %s", baseRef.String()) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) + if !fromCache { + b, err := r.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, data) + } + + return data, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target +// Resolve is not in charge of following references, it only resolves ref by following its URL +// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them +// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("deref: unsupported type %T", input) + } + + curRef := ref.String() + if curRef != "" { + normalizedRef := normalizeFileRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + // NOTE(fredbi): removed basePath check => needs more testing + if ref.String() != "" && ref.String() != curRef { + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) + } + } + + return nil +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { + + if cache == nil { + cache = resCache + } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + loadDoc: func(path string) (json.RawMessage, error) { + debugLog("fetching document at %q", path) + return PathLoader(path) + }, + }, nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_test.go b/vendor/github.com/go-openapi/spec/schema_test.go index b6e3dad37681..f96860131ca3 100644 --- a/vendor/github.com/go-openapi/spec/schema_test.go +++ b/vendor/github.com/go-openapi/spec/schema_test.go @@ -59,7 +59,7 @@ var schema = Schema{ SwaggerSchemaProps: SwaggerSchemaProps{ Discriminator: "not this", ReadOnly: true, - XML: &XMLObject{"sch", "io", "sw", true, true}, + XML: &XMLObject{Name: "sch", Namespace: "io", Prefix: "sw", Attribute: true, Wrapped: true}, ExternalDocs: &ExternalDocumentation{ Description: "the documentation etc", URL: "http://readthedocs.org/swagger", @@ -150,11 +150,11 @@ var schemaJSON = `{ func TestSchema(t *testing.T) { expected := map[string]interface{}{} - json.Unmarshal([]byte(schemaJSON), &expected) + _ = json.Unmarshal([]byte(schemaJSON), &expected) b, err := json.Marshal(schema) if assert.NoError(t, err) { var actual map[string]interface{} - json.Unmarshal(b, &actual) + _ = json.Unmarshal(b, &actual) assert.Equal(t, expected, actual) } @@ -207,6 +207,6 @@ func TestSchema(t *testing.T) { func BenchmarkSchemaUnmarshal(b *testing.B) { for i := 0; i < b.N; i++ { sch := &Schema{} - sch.UnmarshalJSON([]byte(schemaJSON)) + _ = sch.UnmarshalJSON([]byte(schemaJSON)) } } diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go index 9f1b454eac91..fe353842a6fc 100644 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -136,8 +136,5 @@ func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { return err } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil + return json.Unmarshal(data, &s.VendorExtensible) } diff --git a/vendor/github.com/go-openapi/spec/spec_test.go b/vendor/github.com/go-openapi/spec/spec_test.go index cdcb298261b7..52b36e5582c2 100644 --- a/vendor/github.com/go-openapi/spec/spec_test.go +++ b/vendor/github.com/go-openapi/spec/spec_test.go @@ -186,7 +186,7 @@ func Test_Issue69(t *testing.T) { // asserts all $ref expanded jazon, _ := json.MarshalIndent(sp, "", " ") - // assert all $ref maches "$ref": "#/definitions/something" + // assert all $ref match "$ref": "#/definitions/something" m := rex.FindAllStringSubmatch(string(jazon), -1) if assert.NotNil(t, m) { for _, matched := range m { diff --git a/vendor/github.com/go-openapi/spec/structs_test.go b/vendor/github.com/go-openapi/spec/structs_test.go index be231eba5483..d4b87f4faab2 100644 --- a/vendor/github.com/go-openapi/spec/structs_test.go +++ b/vendor/github.com/go-openapi/spec/structs_test.go @@ -31,48 +31,51 @@ func assertSerializeJSON(t testing.TB, actual interface{}, expected string) bool return assert.Equal(t, string(ser), expected) } -func assertParsesJSON(t testing.TB, actual string, expected interface{}) bool { - tpe := reflect.TypeOf(expected) - var pointed bool +func assertSerializeYAML(t testing.TB, actual interface{}, expected string) bool { + ser, err := yaml.Marshal(actual) + if err != nil { + return assert.Fail(t, "unable to marshal to yaml (%s): %#v", err, actual) + } + return assert.Equal(t, string(ser), expected) +} + +func derefTypeOf(expected interface{}) (tpe reflect.Type) { + tpe = reflect.TypeOf(expected) if tpe.Kind() == reflect.Ptr { tpe = tpe.Elem() + } + return +} + +func isPointed(expected interface{}) (pointed bool) { + tpe := reflect.TypeOf(expected) + if tpe.Kind() == reflect.Ptr { pointed = true } + return +} - parsed := reflect.New(tpe) +func assertParsesJSON(t testing.TB, actual string, expected interface{}) bool { + parsed := reflect.New(derefTypeOf(expected)) err := json.Unmarshal([]byte(actual), parsed.Interface()) if err != nil { return assert.Fail(t, "unable to unmarshal from json (%s): %s", err, actual) } act := parsed.Interface() - if !pointed { + if !isPointed(expected) { act = reflect.Indirect(parsed).Interface() } return assert.Equal(t, act, expected) } -func assertSerializeYAML(t testing.TB, actual interface{}, expected string) bool { - ser, err := yaml.Marshal(actual) - if err != nil { - return assert.Fail(t, "unable to marshal to yaml (%s): %#v", err, actual) - } - return assert.Equal(t, string(ser), expected) -} - func assertParsesYAML(t testing.TB, actual string, expected interface{}) bool { - tpe := reflect.TypeOf(expected) - var pointed bool - if tpe.Kind() == reflect.Ptr { - tpe = tpe.Elem() - pointed = true - } - parsed := reflect.New(tpe) + parsed := reflect.New(derefTypeOf(expected)) err := yaml.Unmarshal([]byte(actual), parsed.Interface()) if err != nil { return assert.Fail(t, "unable to unmarshal from yaml (%s): %s", err, actual) } act := parsed.Interface() - if !pointed { + if !isPointed(expected) { act = reflect.Indirect(parsed).Interface() } return assert.EqualValues(t, act, expected) diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 4586a21c8602..44722ffd5adc 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -15,6 +15,8 @@ package spec import ( + "bytes" + "encoding/gob" "encoding/json" "fmt" "strconv" @@ -24,7 +26,8 @@ import ( ) // Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. // // For more information: http://goo.gl/8us55a#swagger-object- type Swagger struct { @@ -67,17 +70,52 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + // SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required type SwaggerProps struct { ID string `json:"id,omitempty"` Consumes []string `json:"consumes,omitempty"` Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Schemes []string `json:"schemes,omitempty"` Swagger string `json:"swagger,omitempty"` Info *Info `json:"info,omitempty"` Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` // must start with a leading "/" - Paths *Paths `json:"paths"` // required + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` Definitions Definitions `json:"definitions,omitempty"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` @@ -87,6 +125,98 @@ type SwaggerProps struct { ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + // Dependencies represent a dependencies property type Dependencies map[string]SchemaOrStringArray @@ -244,9 +374,9 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { if single == nil { return nil } - switch single.(type) { + switch v := single.(type) { case string: - *s = StringOrArray([]string{single.(string)}) + *s = StringOrArray([]string{v}) return nil default: return fmt.Errorf("only string or array is allowed, not %T", single) diff --git a/vendor/github.com/go-openapi/spec/swagger_test.go b/vendor/github.com/go-openapi/spec/swagger_test.go index 87531b1f1da9..23aa5094cd03 100644 --- a/vendor/github.com/go-openapi/spec/swagger_test.go +++ b/vendor/github.com/go-openapi/spec/swagger_test.go @@ -18,9 +18,8 @@ import ( "encoding/json" "testing" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var spec = Swagger{ @@ -52,15 +51,15 @@ var spec = Swagger{ {"internalApiKey": {}}, }, Tags: []Tag{NewTag("pets", "", nil)}, - ExternalDocs: &ExternalDocumentation{"the name", "the url"}, + ExternalDocs: &ExternalDocumentation{Description: "the name", URL: "the url"}, }, - VendorExtensible: VendorExtensible{map[string]interface{}{ + VendorExtensible: VendorExtensible{Extensions: map[string]interface{}{ "x-some-extension": "vendor", "x-schemes": []interface{}{"unix", "amqp"}, }}, } -var specJSON = `{ +const specJSON = `{ "id": "http://localhost:3849/api-docs", "consumes": ["application/json", "application/x-yaml"], "produces": ["application/json"], @@ -71,7 +70,8 @@ var specJSON = `{ "name": "wordnik api team", "url": "http://developer.wordnik.com" }, - "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0 specification", + "description": "A sample API that uses a petstore as an example to demonstrate features in the swagger-2.0` + + ` specification", "license": { "name": "Creative Commons 4.0 International", "url": "http://creativecommons.org/licenses/by/4.0/" @@ -239,7 +239,7 @@ func assertSpecJSON(t testing.TB, specJSON []byte) bool { func TestSwaggerSpec_Serialize(t *testing.T) { expected := make(map[string]interface{}) - json.Unmarshal([]byte(specJSON), &expected) + _ = json.Unmarshal([]byte(specJSON), &expected) b, err := json.MarshalIndent(spec, "", " ") if assert.NoError(t, err) { var actual map[string]interface{} @@ -329,8 +329,7 @@ func TestOptionalSwaggerProps_Serialize(t *testing.T) { } } -func TestSecurityRequirements(t *testing.T) { - minimalJSONSpec := []byte(`{ +var minimalJSONSpec = []byte(`{ "swagger": "2.0", "info": { "version": "0.0.0", @@ -375,6 +374,7 @@ func TestSecurityRequirements(t *testing.T) { } }`) +func TestSecurityRequirements(t *testing.T) { var minimalSpec Swagger err := json.Unmarshal(minimalJSONSpec, &minimalSpec) if assert.NoError(t, err) { @@ -387,3 +387,19 @@ func TestSecurityRequirements(t *testing.T) { assert.Contains(t, sec[2], "queryKey") } } + +func TestSwaggerGobEncoding(t *testing.T) { + doTestSwaggerGobEncoding(t, specJSON) + + doTestSwaggerGobEncoding(t, string(minimalJSONSpec)) +} + +func doTestSwaggerGobEncoding(t *testing.T, fixture string) { + var src, dst Swagger + + if !assert.NoError(t, json.Unmarshal([]byte(fixture), &src)) { + t.FailNow() + } + + doTestAnyGobEncoding(t, &src, &dst) +} diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go index 25256c4beca0..faa3d3de1eb4 100644 --- a/vendor/github.com/go-openapi/spec/tag.go +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -30,10 +30,11 @@ type TagProps struct { // NewTag creates a new tag func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{description, name, externalDocs}} + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} } -// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). // It is not mandatory to have a Tag Object per tag used there. // // For more information: http://goo.gl/8us55a#tagObject diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go new file mode 100644 index 000000000000..aa12b56f6e49 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/unused.go @@ -0,0 +1,174 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +/* + +import ( + "net/url" + "os" + "path" + "path/filepath" + + "github.com/go-openapi/jsonpointer" +) + + // Some currently unused functions and definitions that + // used to be part of the expander. + + // Moved here for the record and possible future reuse + +var ( + idPtr, _ = jsonpointer.New("/id") + refPtr, _ = jsonpointer.New("/$ref") +) + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } + nw, err := ret.Inherits(rf) + if err != nil { + break + } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + + ret = nw + } + + } + + return ret +} + +// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID +func basePathFromSchemaID(oldBasePath, id string) string { + u, err := url.Parse(oldBasePath) + if err != nil { + panic(err) + } + uid, err := url.Parse(id) + if err != nil { + panic(err) + } + + if path.IsAbs(uid.Path) { + return id + } + u.Path = path.Join(path.Dir(u.Path), uid.Path) + return u.String() +} +*/ + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } diff --git a/vendor/github.com/go-openapi/spec/xml_object_test.go b/vendor/github.com/go-openapi/spec/xml_object_test.go index fda3b1084152..da616a9b0248 100644 --- a/vendor/github.com/go-openapi/spec/xml_object_test.go +++ b/vendor/github.com/go-openapi/spec/xml_object_test.go @@ -57,7 +57,13 @@ func TestXmlObject_Deserialize(t *testing.T) { } completed := `{"name":"the name","namespace":"the namespace","prefix":"the prefix","attribute":true,"wrapped":true}` - expected = XMLObject{"the name", "the namespace", "the prefix", true, true} + expected = XMLObject{ + Name: "the name", + Namespace: "the namespace", + Prefix: "the prefix", + Attribute: true, + Wrapped: true, + } actual = XMLObject{} if assert.NoError(t, json.Unmarshal([]byte(completed), &actual)) { assert.Equal(t, expected, actual) diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 4f888fbc8fa5..1dfa80a6fca9 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -34,21 +34,27 @@ type Fuzzer struct { nilChance float64 minElements int maxElements int + maxDepth int } // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, // RandSource, NilChance, or NumElements in any order. func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { f := &Fuzzer{ defaultFuzzFuncs: fuzzFuncMap{ reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), }, fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(time.Now().UnixNano())), + r: rand.New(rand.NewSource(seed)), nilChance: .2, minElements: 1, maxElements: 10, + maxDepth: 100, } return f } @@ -136,6 +142,14 @@ func (f *Fuzzer) genShouldFill() bool { return f.r.Float64() > f.nilChance } +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + // Fuzz recursively fills all of obj's fields with something random. First // this tries to find a custom fuzz function (see Funcs). If there is no // custom function this tests whether the object implements fuzz.Interface and, @@ -144,17 +158,19 @@ func (f *Fuzzer) genShouldFill() bool { // fails, this will generate random values for all primitive fields and then // recurse for all non-primitives. // -// Not safe for cyclic or tree-like structs! +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. // -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. func (f *Fuzzer) Fuzz(obj interface{}) { v := reflect.ValueOf(obj) if v.Kind() != reflect.Ptr { panic("needed ptr!") } v = v.Elem() - f.doFuzz(v, 0) + f.fuzzWithContext(v, 0) } // FuzzNoCustom is just like Fuzz, except that any custom fuzz function for @@ -170,7 +186,7 @@ func (f *Fuzzer) FuzzNoCustom(obj interface{}) { panic("needed ptr!") } v = v.Elem() - f.doFuzz(v, flagNoCustomFuzz) + f.fuzzWithContext(v, flagNoCustomFuzz) } const ( @@ -178,69 +194,87 @@ const ( flagNoCustomFuzz uint64 = 1 << iota ) -func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + if !v.CanSet() { return } if flags&flagNoCustomFuzz == 0 { // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && f.tryCustom(v.Addr()) { + if v.CanAddr() && fc.tryCustom(v.Addr()) { return } - if f.tryCustom(v) { + if fc.tryCustom(v) { return } } if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, f.r) + fn(v, fc.fuzzer.r) return } switch v.Kind() { case reflect.Map: - if f.genShouldFill() { + if fc.fuzzer.genShouldFill() { v.Set(reflect.MakeMap(v.Type())) - n := f.genElementCount() + n := fc.fuzzer.genElementCount() for i := 0; i < n; i++ { key := reflect.New(v.Type().Key()).Elem() - f.doFuzz(key, 0) + fc.doFuzz(key, 0) val := reflect.New(v.Type().Elem()).Elem() - f.doFuzz(val, 0) + fc.doFuzz(val, 0) v.SetMapIndex(key, val) } return } v.Set(reflect.Zero(v.Type())) case reflect.Ptr: - if f.genShouldFill() { + if fc.fuzzer.genShouldFill() { v.Set(reflect.New(v.Type().Elem())) - f.doFuzz(v.Elem(), 0) + fc.doFuzz(v.Elem(), 0) return } v.Set(reflect.Zero(v.Type())) case reflect.Slice: - if f.genShouldFill() { - n := f.genElementCount() + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() v.Set(reflect.MakeSlice(v.Type(), n, n)) for i := 0; i < n; i++ { - f.doFuzz(v.Index(i), 0) + fc.doFuzz(v.Index(i), 0) } return } v.Set(reflect.Zero(v.Type())) case reflect.Array: - if f.genShouldFill() { + if fc.fuzzer.genShouldFill() { n := v.Len() for i := 0; i < n; i++ { - f.doFuzz(v.Index(i), 0) + fc.doFuzz(v.Index(i), 0) } return } v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { - f.doFuzz(v.Field(i), 0) + fc.doFuzz(v.Field(i), 0) } case reflect.Chan: fallthrough @@ -255,20 +289,20 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) { // tryCustom searches for custom handlers, and returns true iff it finds a match // and successfully randomizes v. -func (f *Fuzzer) tryCustom(v reflect.Value) bool { +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { // First: see if we have a fuzz function for it. - doCustom, ok := f.fuzzFuncs[v.Type()] + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] if !ok { // Second: see if it can fuzz itself. if v.CanInterface() { intf := v.Interface() if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{f: f, Rand: f.r}) + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) return true } } // Finally: see if there is a default fuzz function. - doCustom, ok = f.defaultFuzzFuncs[v.Type()] + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] if !ok { return false } @@ -294,8 +328,8 @@ func (f *Fuzzer) tryCustom(v reflect.Value) bool { } doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - f: f, - Rand: f.r, + fc: fc, + Rand: fc.fuzzer.r, })}) return true } @@ -310,7 +344,7 @@ type Interface interface { // Continue can be passed to custom fuzzing functions to allow them to use // the correct source of randomness and to continue fuzzing their members. type Continue struct { - f *Fuzzer + fc *fuzzerContext // For convenience, Continue implements rand.Rand via embedding. // Use this for generating any randomness if you want your fuzzing @@ -325,7 +359,7 @@ func (c Continue) Fuzz(obj interface{}) { panic("needed ptr!") } v = v.Elem() - c.f.doFuzz(v, 0) + c.fc.doFuzz(v, 0) } // FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for @@ -338,7 +372,7 @@ func (c Continue) FuzzNoCustom(obj interface{}) { panic("needed ptr!") } v = v.Elem() - c.f.doFuzz(v, flagNoCustomFuzz) + c.fc.doFuzz(v, flagNoCustomFuzz) } // RandString makes a random string up to 20 characters long. The returned string diff --git a/vendor/github.com/google/gofuzz/fuzz_test.go b/vendor/github.com/google/gofuzz/fuzz_test.go index 65cbd8223de4..4059ea6febb5 100644 --- a/vendor/github.com/google/gofuzz/fuzz_test.go +++ b/vendor/github.com/google/gofuzz/fuzz_test.go @@ -364,7 +364,7 @@ func TestFuzz_noCustom(t *testing.T) { inner.Str = testPhrase }, ) - c := Continue{f: f, Rand: f.r} + c := Continue{fc: &fuzzerContext{fuzzer: f}, Rand: f.r} // Fuzzer.Fuzz() obj1 := Outer{} @@ -426,3 +426,47 @@ func TestFuzz_NumElements(t *testing.T) { return 4, len(obj.A) == 1 }) } + +func TestFuzz_Maxdepth(t *testing.T) { + type S struct { + S *S + } + + f := New().NilChance(0) + + f.MaxDepth(1) + for i := 0; i < 100; i++ { + obj := S{} + f.Fuzz(&obj) + + if obj.S != nil { + t.Errorf("Expected nil") + } + } + + f.MaxDepth(3) // field, ptr + for i := 0; i < 100; i++ { + obj := S{} + f.Fuzz(&obj) + + if obj.S == nil { + t.Errorf("Expected obj.S not nil") + } else if obj.S.S != nil { + t.Errorf("Expected obj.S.S nil") + } + } + + f.MaxDepth(5) // field, ptr, field, ptr + for i := 0; i < 100; i++ { + obj := S{} + f.Fuzz(&obj) + + if obj.S == nil { + t.Errorf("Expected obj.S not nil") + } else if obj.S.S == nil { + t.Errorf("Expected obj.S.S not nil") + } else if obj.S.S.S != nil { + t.Errorf("Expected obj.S.S.S nil") + } + } +} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 000000000000..8ec4fe9e9725 --- /dev/null +++ b/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/vendor/github.com/openshift/api/.travis.yml b/vendor/github.com/openshift/api/.travis.yml index 11cca1b0cd6d..494d91ee489f 100644 --- a/vendor/github.com/openshift/api/.travis.yml +++ b/vendor/github.com/openshift/api/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - "1.10" + - "1.11" install: - wget https://github.com/google/protobuf/releases/download/v3.0.2/protoc-3.0.2-linux-x86_64.zip diff --git a/vendor/github.com/openshift/api/Dockerfile.build b/vendor/github.com/openshift/api/Dockerfile.build new file mode 100644 index 000000000000..0e40b661917e --- /dev/null +++ b/vendor/github.com/openshift/api/Dockerfile.build @@ -0,0 +1,13 @@ +FROM fedora:28 + +ENV GOPATH=/go +ENV PATH=/go/bin:$PATH + +RUN dnf -y install make git unzip golang wget +RUN go get -u -v golang.org/x/tools/cmd/... +RUN wget https://github.com/google/protobuf/releases/download/v3.0.2/protoc-3.0.2-linux-x86_64.zip && \ + mkdir protoc && \ + unzip protoc-3.0.2-linux-x86_64.zip -d protoc/ && \ + mv protoc/bin/protoc /usr/bin && \ + rm -rf protoc + diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 94544587431b..1667fa5f9d71 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -1,6 +1,9 @@ all: build .PHONY: all +RUNTIME ?= podman +RUNTIME_IMAGE_NAME ?= openshift-api-generator + build: go build github.com/openshift/api/... .PHONY: build @@ -19,6 +22,10 @@ update-deps: hack/update-deps.sh .PHONY: update-deps +generate-with-container: Dockerfile.build + $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make generate + generate: hack/update-deepcopy.sh hack/update-protobuf.sh diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go index a7870003aa1f..f8ec2bd1dd71 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -257,7 +257,7 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { } var map_RollingDeploymentStrategyParams = map[string]string{ - "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto index fd5f2207ec9c..12c2d2788571 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.proto +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -333,12 +333,15 @@ message RoleBindingRestrictionList { // field must be non-nil. message RoleBindingRestrictionSpec { // UserRestriction matches against user subjects. + // +nullable optional UserRestriction userrestriction = 1; // GroupRestriction matches against group subjects. + // +nullable optional GroupRestriction grouprestriction = 2; // ServiceAccountRestriction matches against service-account subjects. + // +nullable optional ServiceAccountRestriction serviceaccountrestriction = 3; } diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go index df165f6cdc59..3e619df0a7dc 100644 --- a/vendor/github.com/openshift/api/authorization/v1/types.go +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -460,12 +460,15 @@ type RoleBindingRestriction struct { // field must be non-nil. type RoleBindingRestrictionSpec struct { // UserRestriction matches against user subjects. + // +nullable UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"` // GroupRestriction matches against group subjects. + // +nullable GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"` // ServiceAccountRestriction matches against service-account subjects. + // +nullable ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"` } diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go index 84b0c1c1dd42..cc926d443149 100644 --- a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -367,10 +367,10 @@ func (GenericWebHookCause) SwaggerDoc() map[string]string { } var map_GenericWebHookEvent = map[string]string{ - "": "GenericWebHookEvent is the payload expected for a generic webhook post", - "type": "type is the type of source repository", - "git": "git is the git information if the Type is BuildSourceGit", - "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", + "": "GenericWebHookEvent is the payload expected for a generic webhook post", + "type": "type is the type of source repository", + "git": "git is the git information if the Type is BuildSourceGit", + "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", } @@ -446,7 +446,7 @@ func (ImageChangeCause) SwaggerDoc() map[string]string { } var map_ImageChangeTrigger = map[string]string{ - "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", + "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build", "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.", "paused": "paused is true if this trigger is temporarily disabled. Optional.", diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index b5cbd222ef20..8508b5cd07ba 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -127,10 +127,10 @@ const ( // operator (eg: openshift-apiserver for the openshift-apiserver-operator). OperatorProgressing ClusterStatusConditionType = "Progressing" - // Failing indicates that the operator has encountered an error that is preventing it from working properly. - // The binary maintained by the operator (eg: openshift-apiserver for the openshift-apiserver-operator) may still be - // available, but the user intent cannot be fulfilled. - OperatorFailing ClusterStatusConditionType = "Failing" + // Degraded indicates that the operand is not functioning completely. An example of a degraded state + // would be if there should be 5 copies of the operand running but only 4 are running. It may still be available, + // but it is degraded + OperatorDegraded ClusterStatusConditionType = "Degraded" // Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False` // administrators should not upgrade their cluster and the message field should contain a human readable description diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 8d4cb7776edf..6ab92365b99c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -104,7 +104,7 @@ type ClusterVersionStatus struct { // conditions provides information about the cluster version. The condition // "Available" is set to true if the desiredUpdate has been reached. The // condition "Progressing" is set to true if an update is being applied. - // The condition "Failing" is set to true if an update is currently blocked + // The condition "Degraded" is set to true if an update is currently blocked // by a temporary or permanent error. Conditions are only valid for the // current desiredUpdate when metadata.generation is equal to // status.generation. @@ -158,6 +158,9 @@ type UpdateHistory struct { // image is a container image location that contains the update. This value // is always populated. Image string `json:"image"` + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + Verified bool `json:"verified"` } // ClusterID is string RFC4122 uuid. @@ -202,6 +205,19 @@ type Update struct { // // +optional Image string `json:"image"` + // force allows an administrator to update to an image that has failed + // verification, does not appear in the availableUpdates list, or otherwise + // would be blocked by normal protections on update. This option should only + // be used when the authenticity of the provided image has been verified out + // of band because the provided image will run with full administrative access + // to the cluster. Do not use this flag with images that comes from unknown + // or potentially malicious sources. + // + // This flag does not override other forms of consistency checking that are + // required before a new update is deployed. + // + // +optional + Force bool `json:"force"` } // RetrievedUpdates reports whether available updates have been retrieved from diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 94d1a7a40176..a072f1382453 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -66,7 +66,7 @@ type FeatureGateEnabledDisabled struct { // // If you put an item in either of these lists, put your area and name on it so we can find owners. var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ - Default: &FeatureGateEnabledDisabled{ + Default: { Enabled: []string{ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning "RotateKubeletServerCertificate", // sig-pod, sjenning @@ -76,11 +76,12 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ "LocalStorageCapacityIsolation", // sig-pod, sjenning }, }, - TechPreviewNoUpgrade: &FeatureGateEnabledDisabled{ + TechPreviewNoUpgrade: { Enabled: []string{ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning "RotateKubeletServerCertificate", // sig-pod, sjenning "SupportPodPidsLimit", // sig-pod, sjenning + "CSIBlockVolume", // sig-storage, j-griffith }, Disabled: []string{ "LocalStorageCapacityIsolation", // sig-pod, sjenning diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index d9deb265120a..40e3f2c27948 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -41,7 +41,7 @@ type InfrastructureStatus struct { // value controls whether infrastructure automation such as service load // balancers, dynamic volume provisioning, machine creation and deletion, and // other integrations are enabled. If None, no infrastructure automation is - // enabled. Allowed values are "AWS", "Azure", "GCP", "Libvirt", + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", // "OpenStack", "VSphere", and "None". Individual components may not support // all platforms, and must handle unrecognized platforms as None if they do // not support that platform. @@ -68,6 +68,9 @@ const ( // AzurePlatformType represents Microsoft Azure infrastructure. AzurePlatformType PlatformType = "Azure" + // BareMetalPlatformType represents managed bare metal infrastructure. + BareMetalPlatformType PlatformType = "BareMetal" + // GCPPlatformType represents Google Cloud Platform infrastructure. GCPPlatformType PlatformType = "GCP" diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index a65ced9592cd..48195e8b2faf 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -27,24 +27,24 @@ type SchedulerSpec struct { // The namespace for this configmap is openshift-config. // +optional Policy ConfigMapNameReference `json:"policy"` - // defaultNodeSelector helps set the cluster-wide default node selector to - // restrict pod placement to specific nodes. This is applied to the pods + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods // created in all namespaces without a specified nodeSelector value. // For example, // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector // field in pod spec to "type=user-node,region=east" to all pods created - // in all namespaces. Namespaces having project-wide node selectors won't be - // impacted even if this field is set. This adds an annotation section to - // the namespace. - // For example, if a new namespace is created with + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with // node-selector='type=user-node,region=east', // the annotation openshift.io/node-selector: type=user-node,region=east // gets added to the project. When the openshift.io/node-selector annotation // is set on the project the value is used in preference to the value we are setting - // for defaultNodeSelector field. + // for defaultNodeSelector field. // For instance, - // openshift.io/node-selector: "type=user-node,region=west" means - // that the default of "type=user-node,region=east" set in defaultNodeSelector + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector // would not be applied. // +optional DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 9d9703d70d0c..6619700722ee 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -149,7 +149,7 @@ func (GenericControllerConfig) SwaggerDoc() map[string]string { } var map_HTTPServingInfo = map[string]string{ - "": "HTTPServingInfo holds configuration for serving HTTP", + "": "HTTPServingInfo holds configuration for serving HTTP", "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } @@ -492,7 +492,7 @@ var map_ClusterVersionStatus = map[string]string{ "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent from a previous version.", "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", - "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Failing\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", } @@ -517,6 +517,7 @@ var map_Update = map[string]string{ "": "Update represents a release of the ClusterVersionOperator, referenced by the Image member.", "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "force": "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.", } func (Update) SwaggerDoc() map[string]string { @@ -530,6 +531,7 @@ var map_UpdateHistory = map[string]string{ "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -729,7 +731,7 @@ func (InfrastructureSpec) SwaggerDoc() map[string]string { var map_InfrastructureStatus = map[string]string{ "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", - "platform": "platform is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "platform": "platform is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery", "apiServerURL": "apiServerURL is a valid URL with scheme(http/https), address and port. apiServerURL can be used by components like kubelet on machines, to contact the `apisever` using the infrastructure provider rather than the kubernetes networking.", } @@ -1027,7 +1029,7 @@ func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { } var map_TokenConfig = map[string]string{ - "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defines the default token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are integer values:\n x < 0 Tokens time out is enabled but tokens never timeout unless configured per client (e.g. `-1`)\n x = 0 Tokens time out is disabled (default)\n x > 0 Tokens time out if there is no activity for x seconds\nThe current minimum allowed value for X is 300 (5 minutes)", } @@ -1056,7 +1058,7 @@ func (ProjectList) SwaggerDoc() map[string]string { } var map_ProjectSpec = map[string]string{ - "": "ProjectSpec holds the project creation configuration.", + "": "ProjectSpec holds the project creation configuration.", "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", } diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go index 781590987277..a31b6316cbca 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -232,10 +232,10 @@ func (ImageStreamSpec) SwaggerDoc() map[string]string { } var map_ImageStreamStatus = map[string]string{ - "": "ImageStreamStatus contains information about the state of this image stream.", + "": "ImageStreamStatus contains information about the state of this image stream.", "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", - "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", + "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", } func (ImageStreamStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go index 8c6488156cea..75ee2a42b1bf 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -100,11 +100,11 @@ func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string { } var map_BuildDefaultsConfig = map[string]string{ - "": "BuildDefaultsConfig controls the default information for Builds", - "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", - "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", - "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", - "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "": "BuildDefaultsConfig controls the default information for Builds", + "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", + "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", + "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.", "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", @@ -246,7 +246,7 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string { } var map_EtcdStorageConfig = map[string]string{ - "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes", + "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes", "kubernetesStorageVersion": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", "kubernetesStoragePrefix": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", "openShiftStorageVersion": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", @@ -325,7 +325,7 @@ func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { } var map_HTTPServingInfo = map[string]string{ - "": "HTTPServingInfo holds configuration for serving HTTP", + "": "HTTPServingInfo holds configuration for serving HTTP", "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } @@ -358,9 +358,9 @@ func (ImageConfig) SwaggerDoc() map[string]string { } var map_ImagePolicyConfig = map[string]string{ - "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images", - "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", - "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.", + "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images", + "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.", "scheduledImageImportMinimumIntervalSeconds": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", "maxScheduledImageImportsPerMinute": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", @@ -374,7 +374,7 @@ func (ImagePolicyConfig) SwaggerDoc() map[string]string { } var map_JenkinsPipelineConfig = map[string]string{ - "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", + "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", "autoProvisionEnabled": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", "templateNamespace": "TemplateNamespace contains the namespace name where the Jenkins template is stored", "templateName": "TemplateName is the name of the default Jenkins template", @@ -467,12 +467,12 @@ func (LDAPQuery) SwaggerDoc() map[string]string { } var map_LDAPSyncConfig = map[string]string{ - "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync", - "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port", - "bindDN": "BindDN is an optional DN to bind to the LDAP server with", - "bindPassword": "BindPassword is an optional password to bind with during the search phase.", - "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync", + "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port", + "bindDN": "BindDN is an optional DN to bind to the LDAP server with", + "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "groupUIDNameMapping": "LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to OpenShift Group names", "rfc2307": "RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members", "activeDirectory": "ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of", @@ -504,8 +504,8 @@ func (MasterAuthConfig) SwaggerDoc() map[string]string { } var map_MasterClients = map[string]string{ - "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes", - "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", + "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes", + "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", "openshiftLoopbackClientConnectionOverrides": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", } @@ -565,7 +565,7 @@ func (MasterNetworkConfig) SwaggerDoc() map[string]string { } var map_MasterVolumeConfig = map[string]string{ - "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", + "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", "dynamicProvisioningEnabled": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", } @@ -583,7 +583,7 @@ func (NamedCertificate) SwaggerDoc() map[string]string { } var map_NodeAuthConfig = map[string]string{ - "": "NodeAuthConfig holds authn/authz configuration options", + "": "NodeAuthConfig holds authn/authz configuration options", "authenticationCacheTTL": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", "authenticationCacheSize": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", "authorizationCacheTTL": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", @@ -692,8 +692,8 @@ var map_OpenIDIdentityProvider = map[string]string{ "clientSecret": "ClientSecret is the oauth client secret", "extraScopes": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.", "extraAuthorizeParameters": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.", - "urls": "URLs to use to authenticate", - "claims": "Claims mappings", + "urls": "URLs to use to authenticate", + "claims": "Claims mappings", } func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { @@ -712,8 +712,8 @@ func (OpenIDURLs) SwaggerDoc() map[string]string { } var map_PodManifestConfig = map[string]string{ - "": "PodManifestConfig holds the necessary configuration options for using pod manifests", - "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", + "": "PodManifestConfig holds the necessary configuration options for using pod manifests", + "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", "fileCheckIntervalSeconds": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", } @@ -722,7 +722,7 @@ func (PodManifestConfig) SwaggerDoc() map[string]string { } var map_PolicyConfig = map[string]string{ - "": "\n holds the necessary configuration options for", + "": "\n holds the necessary configuration options for", "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", } @@ -731,7 +731,7 @@ func (PolicyConfig) SwaggerDoc() map[string]string { } var map_ProjectConfig = map[string]string{ - "": "\n holds the necessary configuration options for", + "": "\n holds the necessary configuration options for", "defaultNodeSelector": "DefaultNodeSelector holds default project node label selector", "projectRequestMessage": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", "projectRequestTemplate": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", @@ -924,7 +924,7 @@ func (StringSourceSpec) SwaggerDoc() map[string]string { } var map_TokenConfig = map[string]string{ - "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", "authorizeTokenMaxAgeSeconds": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens", "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go index 0cd9f1dade89..9547ae2bea91 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -20,11 +20,11 @@ func (BuildControllerConfig) SwaggerDoc() map[string]string { } var map_BuildDefaultsConfig = map[string]string{ - "": "BuildDefaultsConfig controls the default information for Builds", - "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", - "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", - "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", - "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "": "BuildDefaultsConfig controls the default information for Builds", + "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", + "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", + "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.", "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", @@ -121,7 +121,7 @@ func (IngressControllerConfig) SwaggerDoc() map[string]string { } var map_JenkinsPipelineConfig = map[string]string{ - "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", + "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", "autoProvisionEnabled": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", "templateNamespace": "templateNamespace contains the namespace name where the Jenkins template is stored", "templateName": "templateName is the name of the default Jenkins template", diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index 0295798e275b..3f5af652f6f5 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -1,9 +1,8 @@ package v1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // MyOperatorResource is an example operator configuration type @@ -54,9 +53,10 @@ type OperatorSpec struct { // +optional LogLevel LogLevel `json:"logLevel"` - // operandSpecs provide customization for functional units within the component + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. // +optional - OperandSpecs []OperandSpec `json:"operandSpecs,omitempty"` + OperatorLogLevel LogLevel `json:"operatorLogLevel"` // unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override // it will end up overlaying in the following order: @@ -91,38 +91,6 @@ var ( TraceAll LogLevel = "TraceAll" ) -// ResourcePatch is a way to represent the patch you would issue to `kubectl patch` in the API -type ResourcePatch struct { - // type is the type of patch to apply: jsonmerge, strategicmerge - Type string `json:"type"` - // patch the patch itself - Patch string `json:"patch"` -} - -// OperandSpec holds information for customization of a particular functional unit - logically maps to a workload -type OperandSpec struct { - // name is the name of this unit. The operator must be aware of it. - Name string `json:"name"` - - // operandContainerSpecs are per-container options - // +optional - OperandContainerSpecs []OperandContainerSpec `json:"operandContainerSpecs,omitempty"` - - // unsupportedResourcePatches are applied to the workload resource for this unit. This is an unsupported - // workaround if anything needs to be modified on the workload that is not otherwise configurable. - // TODO Decide: alternatively, we could simply include a RawExtension which is used in place of the "normal" default manifest - // +optional - UnsupportedResourcePatches []ResourcePatch `json:"unsupportedResourcePatches,omitempty"` -} - -type OperandContainerSpec struct { - // name is the name of the container to modify - Name string `json:"name"` - - // resources are the requests and limits to place in the container. Nil means to accept the defaults. - Resources *corev1.ResourceRequirements `json:"resources,omitempty"` -} - type OperatorStatus struct { // observedGeneration is the last generation change you've dealt with // +optional @@ -165,8 +133,8 @@ var ( OperatorStatusTypeAvailable = "Available" // Progressing indicates that the operator is trying to transition the operand to a different state OperatorStatusTypeProgressing = "Progressing" - // Failing indicates that the operator (not the operand) is unable to fulfill the user intent - OperatorStatusTypeFailing = "Failing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the // current and desired states. OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" @@ -195,6 +163,11 @@ const ( type StaticPodOperatorSpec struct { OperatorSpec `json:",inline"` + // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + // this time instead of failing again on the same config. + ForceRedeploymentReason string `json:"forceRedeploymentReason"` + // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api // -1 = unlimited, 0 or unset = 5 (default) FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go index d697cf8f13ce..1dee7ca27ace 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -14,7 +14,7 @@ type Authentication struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec AuthenticationSpec `json:"spec,omitempty"` + Spec AuthenticationSpec `json:"spec,omitempty"` // +optional Status AuthenticationStatus `json:"status,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index 6aa8edfc192f..92e74e5644a6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -11,7 +11,7 @@ type Console struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec ConsoleSpec `json:"spec,omitempty"` + Spec ConsoleSpec `json:"spec,omitempty"` // +optional Status ConsoleStatus `json:"status,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index a3135120faec..83376ba4f955 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -14,18 +14,13 @@ type Etcd struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec EtcdSpec `json:"spec"` + Spec EtcdSpec `json:"spec"` // +optional Status EtcdStatus `json:"status"` } type EtcdSpec struct { StaticPodOperatorSpec `json:",inline"` - - // forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. - // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work - // this time instead of failing again on the same config. - ForceRedeploymentReason string `json:"forceRedeploymentReason"` } type EtcdStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 79bcaf7e9e39..4e44b0116a8c 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -137,6 +137,16 @@ type NodePlacement struct { // // +optional NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to ingress controller + // deployments. + // + // The default is an empty list. + // + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` } // EndpointPublishingStrategyType is a way to publish ingress controller endpoints. diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index d0d14f3cc560..6b79a8e0c617 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -14,18 +14,13 @@ type KubeAPIServer struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec KubeAPIServerSpec `json:"spec"` + Spec KubeAPIServerSpec `json:"spec"` // +optional Status KubeAPIServerStatus `json:"status"` } type KubeAPIServerSpec struct { StaticPodOperatorSpec `json:",inline"` - - // forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. - // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work - // this time instead of failing again on the same config. - ForceRedeploymentReason string `json:"forceRedeploymentReason"` } type KubeAPIServerStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go index 564be1ac09d8..36ecc5edadeb 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -14,18 +14,13 @@ type KubeControllerManager struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec KubeControllerManagerSpec `json:"spec"` + Spec KubeControllerManagerSpec `json:"spec"` // +optional Status KubeControllerManagerStatus `json:"status"` } type KubeControllerManagerSpec struct { StaticPodOperatorSpec `json:",inline"` - - // forceRedeploymentReason can be used to force the redeployment of the kube-controller-manager by providing a unique string. - // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work - // this time instead of failing again on the same config. - ForceRedeploymentReason string `json:"forceRedeploymentReason"` } type KubeControllerManagerStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go index e26ee9c13e99..ec0487ccfcf0 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -14,7 +14,7 @@ type OpenShiftAPIServer struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec OpenShiftAPIServerSpec `json:"spec"` + Spec OpenShiftAPIServerSpec `json:"spec"` // +optional Status OpenShiftAPIServerStatus `json:"status"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go index bad14acbe3cd..57ebe8e3a602 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -14,7 +14,7 @@ type OpenShiftControllerManager struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec OpenShiftControllerManagerSpec `json:"spec"` + Spec OpenShiftControllerManagerSpec `json:"spec"` // +optional Status OpenShiftControllerManagerStatus `json:"status"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go index 0d08b804445a..69fa5fbe4cee 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -14,18 +14,13 @@ type KubeScheduler struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec KubeSchedulerSpec `json:"spec"` + Spec KubeSchedulerSpec `json:"spec"` // +optional Status KubeSchedulerStatus `json:"status"` } type KubeSchedulerSpec struct { StaticPodOperatorSpec `json:",inline"` - - // forceRedeploymentReason can be used to force the redeployment of the kube-scheduler by providing a unique string. - // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work - // this time instead of failing again on the same config. - ForceRedeploymentReason string `json:"forceRedeploymentReason"` } type KubeSchedulerStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go index 42a51dbc9a0e..e6bb242aa263 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -14,8 +14,10 @@ type ServiceCA struct { metav1.ObjectMeta `json:"metadata"` // +required - Spec ServiceCASpec `json:"spec"` + //spec holds user settable values for configuration + Spec ServiceCASpec `json:"spec"` // +optional + // status holds observed values from the cluster. They may not be overridden. Status ServiceCAStatus `json:"status"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go index 92e0098546e9..aa480d732278 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -14,7 +14,7 @@ type ServiceCatalogAPIServer struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec ServiceCatalogAPIServerSpec `json:"spec"` + Spec ServiceCatalogAPIServerSpec `json:"spec"` // +optional Status ServiceCatalogAPIServerStatus `json:"status"` } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index e05af9292f81..8974078f6152 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -1108,6 +1108,13 @@ func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1384,55 +1391,6 @@ func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperandContainerSpec) DeepCopyInto(out *OperandContainerSpec) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(corev1.ResourceRequirements) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandContainerSpec. -func (in *OperandContainerSpec) DeepCopy() *OperandContainerSpec { - if in == nil { - return nil - } - out := new(OperandContainerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperandSpec) DeepCopyInto(out *OperandSpec) { - *out = *in - if in.OperandContainerSpecs != nil { - in, out := &in.OperandContainerSpecs, &out.OperandContainerSpecs - *out = make([]OperandContainerSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.UnsupportedResourcePatches != nil { - in, out := &in.UnsupportedResourcePatches, &out.UnsupportedResourcePatches - *out = make([]ResourcePatch, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandSpec. -func (in *OperandSpec) DeepCopy() *OperandSpec { - if in == nil { - return nil - } - out := new(OperandSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { *out = *in @@ -1453,13 +1411,6 @@ func (in *OperatorCondition) DeepCopy() *OperatorCondition { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { *out = *in - if in.OperandSpecs != nil { - in, out := &in.OperandSpecs, &out.OperandSpecs - *out = make([]OperandSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) return @@ -1534,22 +1485,6 @@ func (in *ProxyConfig) DeepCopy() *ProxyConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourcePatch) DeepCopyInto(out *ResourcePatch) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePatch. -func (in *ResourcePatch) DeepCopy() *ResourcePatch { - if in == nil { - return nil - } - out := new(ResourcePatch) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 82391b3928a5..606f30c57712 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -46,26 +46,6 @@ func (NodeStatus) SwaggerDoc() map[string]string { return map_NodeStatus } -var map_OperandContainerSpec = map[string]string{ - "name": "name is the name of the container to modify", - "resources": "resources are the requests and limits to place in the container. Nil means to accept the defaults.", -} - -func (OperandContainerSpec) SwaggerDoc() map[string]string { - return map_OperandContainerSpec -} - -var map_OperandSpec = map[string]string{ - "": "OperandSpec holds information for customization of a particular functional unit - logically maps to a workload", - "name": "name is the name of this unit. The operator must be aware of it.", - "operandContainerSpecs": "operandContainerSpecs are per-container options", - "unsupportedResourcePatches": "unsupportedResourcePatches are applied to the workload resource for this unit. This is an unsupported workaround if anything needs to be modified on the workload that is not otherwise configurable.", -} - -func (OperandSpec) SwaggerDoc() map[string]string { - return map_OperandSpec -} - var map_OperatorCondition = map[string]string{ "": "OperatorCondition is just the standard condition fields.", } @@ -78,7 +58,7 @@ var map_OperatorSpec = map[string]string{ "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", "managementState": "managementState indicates whether and how the operator should manage the component", "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.", - "operandSpecs": "operandSpecs provide customization for functional units within the component", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.", "unsupportedConfigOverrides": "unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides", "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", } @@ -99,20 +79,11 @@ func (OperatorStatus) SwaggerDoc() map[string]string { return map_OperatorStatus } -var map_ResourcePatch = map[string]string{ - "": "ResourcePatch is a way to represent the patch you would issue to `kubectl patch` in the API", - "type": "type is the type of patch to apply: jsonmerge, strategicmerge", - "patch": "patch the patch itself", -} - -func (ResourcePatch) SwaggerDoc() map[string]string { - return map_ResourcePatch -} - var map_StaticPodOperatorSpec = map[string]string{ - "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", - "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", - "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", + "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", } func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { @@ -120,7 +91,7 @@ func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { } var map_StaticPodOperatorStatus = map[string]string{ - "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", } @@ -217,14 +188,6 @@ func (EtcdList) SwaggerDoc() map[string]string { return map_EtcdList } -var map_EtcdSpec = map[string]string{ - "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", -} - -func (EtcdSpec) SwaggerDoc() map[string]string { - return map_EtcdSpec -} - var map_EndpointPublishingStrategy = map[string]string{ "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS platform.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.", @@ -283,6 +246,7 @@ func (IngressControllerStatus) SwaggerDoc() map[string]string { var map_NodePlacement = map[string]string{ "": "NodePlacement describes node scheduling configuration for an ingress controller.", "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf unset, the default is:\n\n beta.kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", } func (NodePlacement) SwaggerDoc() map[string]string { @@ -307,14 +271,6 @@ func (KubeAPIServerList) SwaggerDoc() map[string]string { return map_KubeAPIServerList } -var map_KubeAPIServerSpec = map[string]string{ - "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", -} - -func (KubeAPIServerSpec) SwaggerDoc() map[string]string { - return map_KubeAPIServerSpec -} - var map_KubeControllerManager = map[string]string{ "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.", } @@ -333,14 +289,6 @@ func (KubeControllerManagerList) SwaggerDoc() map[string]string { return map_KubeControllerManagerList } -var map_KubeControllerManagerSpec = map[string]string{ - "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the kube-controller-manager by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", -} - -func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { - return map_KubeControllerManagerSpec -} - var map_AdditionalNetworkDefinition = map[string]string{ "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", "type": "type is the type of network The only supported value is NetworkTypeRaw", @@ -420,10 +368,10 @@ func (OVNKubernetesConfig) SwaggerDoc() map[string]string { } var map_OpenShiftSDNConfig = map[string]string{ - "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", - "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", - "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", - "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", "useExternalOpenvswitch": "useExternalOpenvswitch tells the operator not to install openvswitch, because it will be provided separately. If set, you must provide it yourself.", } @@ -496,16 +444,10 @@ func (KubeSchedulerList) SwaggerDoc() map[string]string { return map_KubeSchedulerList } -var map_KubeSchedulerSpec = map[string]string{ - "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the kube-scheduler by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", -} - -func (KubeSchedulerSpec) SwaggerDoc() map[string]string { - return map_KubeSchedulerSpec -} - var map_ServiceCA = map[string]string{ - "": "ServiceCA provides information to configure an operator to manage the service cert controllers", + "": "ServiceCA provides information to configure an operator to manage the service cert controllers", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (ServiceCA) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go index 76b45b0568c3..59a145211eb4 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -113,7 +113,7 @@ func (OperatorStatus) SwaggerDoc() map[string]string { } var map_StaticPodOperatorStatus = map[string]string{ - "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", "latestAvailableDeploymentGeneration": "latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment", "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", } diff --git a/vendor/github.com/openshift/api/route/OWNERS b/vendor/github.com/openshift/api/route/OWNERS index 279009f7ae36..74038975d316 100644 --- a/vendor/github.com/openshift/api/route/OWNERS +++ b/vendor/github.com/openshift/api/route/OWNERS @@ -1,4 +1,5 @@ reviewers: - - danwinship - - dcbw + - ironcladlou - knobunc + - pravisankar + - Miciah diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go index b0ad233717e3..dde33a99432f 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -360,6 +360,10 @@ func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) i += copy(dAtA[i:], m.WildcardPolicy) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i += copy(dAtA[i:], m.Subdomain) return i, nil } @@ -598,6 +602,8 @@ func (m *RouteSpec) Size() (n int) { } l = len(m.WildcardPolicy) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -740,6 +746,7 @@ func (this *RouteSpec) String() string { `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "RoutePort", "RoutePort", 1) + `,`, `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "TLSConfig", 1) + `,`, `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, `}`, }, "") return s @@ -1770,6 +1777,35 @@ func (m *RouteSpec) Unmarshal(dAtA []byte) error { } m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2442,77 +2478,78 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1146 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0x8f, 0xff, 0xa6, 0x1e, 0xb7, 0x85, 0x0c, 0x94, 0xba, 0x91, 0x62, 0xa7, 0x7b, 0x40, 0x29, - 0x2a, 0xbb, 0x24, 0x14, 0xa8, 0x84, 0x38, 0xd4, 0x29, 0x82, 0x34, 0x4e, 0x1a, 0x8d, 0x2d, 0x2a, - 0xaa, 0x1e, 0x98, 0xec, 0x8e, 0xd7, 0x83, 0xed, 0x99, 0x65, 0x66, 0x9c, 0xe2, 0x0b, 0xaa, 0xc4, - 0x17, 0x28, 0x7c, 0x1a, 0x3e, 0x42, 0x8e, 0x3d, 0xf6, 0x80, 0x2c, 0x62, 0x8e, 0x7c, 0x83, 0x9c, - 0xd0, 0xcc, 0x8e, 0xbd, 0xeb, 0xc4, 0x49, 0x5d, 0xb8, 0xed, 0xbc, 0xf7, 0x7e, 0xbf, 0xf7, 0xe6, - 0xbd, 0xb7, 0xbf, 0x01, 0x9b, 0x21, 0x55, 0x9d, 0xc1, 0xa1, 0xeb, 0xf3, 0xbe, 0xc7, 0x23, 0xc2, - 0x64, 0x87, 0xb6, 0x95, 0x87, 0x23, 0xea, 0x09, 0x3e, 0x50, 0xc4, 0x3b, 0xda, 0xf4, 0x42, 0xc2, - 0x88, 0xc0, 0x8a, 0x04, 0x6e, 0x24, 0xb8, 0xe2, 0xf0, 0x76, 0x02, 0x71, 0xa7, 0x10, 0x17, 0x47, - 0xd4, 0x35, 0x10, 0xf7, 0x68, 0x73, 0xf5, 0xe3, 0x14, 0x6b, 0xc8, 0x43, 0xee, 0x19, 0xe4, 0xe1, - 0xa0, 0x6d, 0x4e, 0xe6, 0x60, 0xbe, 0x62, 0xc6, 0x55, 0xa7, 0x7b, 0x5f, 0xba, 0x94, 0x9b, 0xb4, - 0x3e, 0x17, 0xf3, 0xb2, 0xae, 0xde, 0x4b, 0x62, 0xfa, 0xd8, 0xef, 0x50, 0x46, 0xc4, 0xd0, 0x8b, - 0xba, 0xa1, 0x36, 0x48, 0xaf, 0x4f, 0x14, 0x9e, 0x87, 0xfa, 0xfc, 0x22, 0x94, 0x18, 0x30, 0x45, - 0xfb, 0xc4, 0x93, 0x7e, 0x87, 0xf4, 0xf1, 0x39, 0xdc, 0xa7, 0x17, 0xe1, 0x06, 0x8a, 0xf6, 0x3c, - 0xca, 0x94, 0x54, 0xe2, 0x2c, 0xc8, 0xf9, 0x2d, 0x0b, 0x0a, 0x48, 0xb7, 0x00, 0xfe, 0x00, 0xae, - 0xe8, 0x8a, 0x02, 0xac, 0x70, 0x25, 0xb3, 0x9e, 0xd9, 0x28, 0x6f, 0x7d, 0xe2, 0xc6, 0x8c, 0x6e, - 0x9a, 0xd1, 0x8d, 0xba, 0xa1, 0x36, 0x48, 0x57, 0x47, 0xbb, 0x47, 0x9b, 0xee, 0xe3, 0xc3, 0x1f, - 0x89, 0xaf, 0xf6, 0x88, 0xc2, 0x75, 0x78, 0x3c, 0xaa, 0x2d, 0x8d, 0x47, 0x35, 0x90, 0xd8, 0xd0, - 0x94, 0x15, 0xee, 0x83, 0xbc, 0x8c, 0x88, 0x5f, 0xc9, 0x1a, 0xf6, 0xbb, 0xee, 0x1b, 0x67, 0xe2, - 0x9a, 0xca, 0x9a, 0x11, 0xf1, 0xeb, 0x57, 0x2d, 0x73, 0x5e, 0x9f, 0x90, 0xe1, 0x81, 0xdf, 0x81, - 0xa2, 0x54, 0x58, 0x0d, 0x64, 0x25, 0x67, 0x18, 0xdd, 0x85, 0x19, 0x0d, 0xaa, 0x7e, 0xdd, 0x72, - 0x16, 0xe3, 0x33, 0xb2, 0x6c, 0xce, 0xaf, 0x39, 0x70, 0xd5, 0xc4, 0xed, 0xb0, 0x50, 0x10, 0x29, - 0xe1, 0x3a, 0xc8, 0x77, 0xb8, 0x54, 0xa6, 0x2d, 0xa5, 0xa4, 0x94, 0x6f, 0xb9, 0x54, 0xc8, 0x78, - 0xe0, 0x16, 0x00, 0x26, 0x85, 0xd8, 0xc7, 0x7d, 0x62, 0x2e, 0x58, 0x4a, 0x9a, 0x81, 0xa6, 0x1e, - 0x94, 0x8a, 0x82, 0x3d, 0x00, 0x7c, 0xce, 0x02, 0xaa, 0x28, 0x67, 0xfa, 0x0a, 0xb9, 0x8d, 0xf2, - 0xd6, 0xfd, 0x45, 0xaf, 0x60, 0x4b, 0xdb, 0x9e, 0x10, 0x24, 0xd9, 0xa6, 0x26, 0x89, 0x52, 0xfc, - 0xb0, 0x05, 0xae, 0x3f, 0xa7, 0xbd, 0xc0, 0xc7, 0x22, 0x38, 0xe0, 0x3d, 0xea, 0x0f, 0x2b, 0x79, - 0x53, 0xe5, 0x5d, 0x8b, 0xbb, 0xfe, 0x64, 0xc6, 0x7b, 0x3a, 0xaa, 0xc1, 0x59, 0x4b, 0x6b, 0x18, - 0x11, 0x74, 0x86, 0x03, 0x7e, 0x0f, 0x6e, 0xc6, 0x37, 0xda, 0xc6, 0x8c, 0x33, 0xea, 0xe3, 0x9e, - 0x6e, 0x0a, 0xd3, 0x4d, 0x28, 0x18, 0xfa, 0x9a, 0xa5, 0xbf, 0x89, 0xe6, 0x87, 0xa1, 0x8b, 0xf0, - 0xce, 0x3f, 0x59, 0x70, 0x63, 0xee, 0x55, 0xe1, 0x57, 0x20, 0xaf, 0x86, 0x11, 0xb1, 0xe3, 0xb8, - 0x33, 0x19, 0x87, 0x2e, 0xf0, 0x74, 0x54, 0xbb, 0x35, 0x17, 0x64, 0xaa, 0x37, 0x30, 0xd8, 0x98, - 0xae, 0x4d, 0x3c, 0xa7, 0x7b, 0xb3, 0x6b, 0x70, 0x3a, 0xaa, 0xcd, 0xf9, 0xb7, 0xdd, 0x29, 0xd3, - 0xec, 0xb2, 0xc0, 0x0f, 0x41, 0x51, 0x10, 0x2c, 0x39, 0x33, 0x4b, 0x58, 0x4a, 0x96, 0x0a, 0x19, - 0x2b, 0xb2, 0x5e, 0x78, 0x07, 0x2c, 0xf7, 0x89, 0x94, 0x38, 0x24, 0xb6, 0xf1, 0xef, 0xd8, 0xc0, - 0xe5, 0xbd, 0xd8, 0x8c, 0x26, 0x7e, 0x28, 0x00, 0xec, 0x61, 0xa9, 0x5a, 0x02, 0x33, 0x19, 0x17, - 0x4f, 0x6d, 0x3f, 0xcb, 0x5b, 0x1f, 0x2d, 0xf6, 0x4f, 0x6a, 0x44, 0xfd, 0x83, 0xf1, 0xa8, 0x06, - 0x1b, 0xe7, 0x98, 0xd0, 0x1c, 0x76, 0xe7, 0x8f, 0x0c, 0x28, 0x99, 0xc6, 0x35, 0xa8, 0x54, 0xf0, - 0xd9, 0x39, 0x2d, 0x70, 0x17, 0xcb, 0xab, 0xd1, 0x46, 0x09, 0xde, 0xb5, 0xb7, 0xbb, 0x32, 0xb1, - 0xa4, 0x74, 0x60, 0x0f, 0x14, 0xa8, 0x22, 0x7d, 0xdd, 0x7f, 0xbd, 0xf3, 0x1b, 0x8b, 0xee, 0x7c, - 0xfd, 0x9a, 0x25, 0x2d, 0xec, 0x68, 0x38, 0x8a, 0x59, 0x9c, 0x9f, 0x6c, 0xe5, 0x07, 0x5c, 0x28, - 0x18, 0x00, 0xa0, 0xb0, 0x08, 0x89, 0xd2, 0xa7, 0x37, 0xea, 0x98, 0x56, 0x46, 0x37, 0x56, 0x46, - 0x77, 0x87, 0xa9, 0xc7, 0xa2, 0xa9, 0x04, 0x65, 0x61, 0xf2, 0x33, 0xb5, 0xa6, 0x5c, 0x28, 0xc5, - 0xeb, 0xfc, 0x9e, 0xb7, 0x39, 0xb5, 0x1a, 0x2d, 0x20, 0x0f, 0xeb, 0x20, 0x1f, 0x61, 0xd5, 0xb1, - 0x0b, 0x37, 0x8d, 0x38, 0xc0, 0xaa, 0x83, 0x8c, 0x07, 0x36, 0x41, 0x56, 0x71, 0xab, 0x63, 0x5f, - 0x2c, 0xda, 0x90, 0xb8, 0x3a, 0x44, 0xda, 0x44, 0x10, 0xe6, 0x93, 0x3a, 0xb0, 0xc4, 0xd9, 0x16, - 0x47, 0x59, 0xc5, 0xe1, 0x8b, 0x0c, 0x58, 0xc1, 0x3d, 0x45, 0x04, 0xc3, 0x8a, 0xd4, 0xb1, 0xdf, - 0x25, 0x2c, 0x90, 0x95, 0xbc, 0xe9, 0xfa, 0x7f, 0x4e, 0x72, 0xcb, 0x26, 0x59, 0x79, 0x70, 0x96, - 0x19, 0x9d, 0x4f, 0x06, 0x1f, 0x81, 0x7c, 0xa4, 0x27, 0x51, 0x78, 0x3b, 0xcd, 0xd7, 0x5d, 0xae, - 0x5f, 0x31, 0x3d, 0xd2, 0xbd, 0x37, 0x1c, 0xf0, 0x1b, 0x90, 0x53, 0x3d, 0x59, 0x29, 0x2e, 0x4c, - 0xd5, 0x6a, 0x34, 0xb7, 0x39, 0x6b, 0xd3, 0xb0, 0xbe, 0x3c, 0x1e, 0xd5, 0x72, 0xad, 0x46, 0x13, - 0x69, 0x86, 0x39, 0x5a, 0xb8, 0xfc, 0xff, 0xb5, 0xd0, 0xa1, 0xa0, 0x9c, 0x7a, 0x5d, 0xe0, 0x53, - 0xb0, 0x4c, 0x63, 0x11, 0xaa, 0x64, 0x4c, 0xc7, 0xbd, 0xb7, 0xd4, 0xf6, 0x44, 0x21, 0xac, 0x01, - 0x4d, 0x08, 0x9d, 0x5f, 0xc0, 0xfb, 0xf3, 0x66, 0xa3, 0xf7, 0xac, 0x4b, 0x59, 0x70, 0x76, 0x13, - 0x77, 0x29, 0x0b, 0x90, 0xf1, 0xe8, 0x08, 0x96, 0x3c, 0x51, 0xd3, 0x08, 0xf3, 0x38, 0x19, 0x0f, - 0x74, 0x40, 0xf1, 0x39, 0xa1, 0x61, 0x47, 0x99, 0x6d, 0x2c, 0xd4, 0x81, 0x16, 0xb3, 0x27, 0xc6, - 0x82, 0xac, 0xc7, 0xe1, 0xf6, 0xaa, 0xa2, 0xd9, 0xc1, 0x22, 0x80, 0x1e, 0x28, 0x49, 0xfd, 0x61, - 0x1e, 0xbf, 0x38, 0xf7, 0x8a, 0x65, 0x2e, 0x35, 0x27, 0x0e, 0x94, 0xc4, 0x68, 0x40, 0xc0, 0x64, - 0x73, 0xd0, 0x6e, 0xd3, 0x9f, 0x6d, 0x29, 0x53, 0xc0, 0xc3, 0xfd, 0x66, 0xec, 0x40, 0x49, 0x8c, - 0xf3, 0x67, 0x0e, 0x94, 0xa6, 0xd3, 0x84, 0xbb, 0xa0, 0xac, 0x88, 0xe8, 0x53, 0x86, 0xb5, 0x7e, - 0x9d, 0x79, 0x07, 0xca, 0xad, 0xc4, 0xa5, 0x27, 0xd7, 0x6a, 0x34, 0x53, 0x16, 0x33, 0xb9, 0x34, - 0x1a, 0x7e, 0x06, 0xca, 0x3e, 0x11, 0x8a, 0xb6, 0xa9, 0x8f, 0xd5, 0xa4, 0x31, 0xef, 0x4d, 0xc8, - 0xb6, 0x13, 0x17, 0x4a, 0xc7, 0xc1, 0x35, 0x90, 0xeb, 0x92, 0xa1, 0x15, 0xfd, 0xb2, 0x0d, 0xcf, - 0xed, 0x92, 0x21, 0xd2, 0x76, 0xf8, 0x25, 0xb8, 0xe6, 0xe3, 0x14, 0xd8, 0x8a, 0xfe, 0x0d, 0x1b, - 0x78, 0x6d, 0xfb, 0x41, 0x9a, 0x79, 0x36, 0x16, 0x3e, 0x03, 0x95, 0x80, 0x48, 0x65, 0x2b, 0x9c, - 0x09, 0xb5, 0xcf, 0xea, 0xba, 0xe5, 0xa9, 0x3c, 0xbc, 0x20, 0x0e, 0x5d, 0xc8, 0x00, 0x5f, 0x66, - 0xc0, 0x1a, 0x65, 0x92, 0xf8, 0x03, 0x41, 0xbe, 0x0e, 0x42, 0x92, 0xea, 0x8e, 0xfd, 0x1b, 0x8a, - 0x26, 0xc7, 0x23, 0x9b, 0x63, 0x6d, 0xe7, 0xb2, 0xe0, 0xd3, 0x51, 0xed, 0xf6, 0xa5, 0x01, 0xa6, - 0xe3, 0x97, 0x27, 0xac, 0x6f, 0x1c, 0x9f, 0x54, 0x97, 0x5e, 0x9d, 0x54, 0x97, 0x5e, 0x9f, 0x54, - 0x97, 0x5e, 0x8c, 0xab, 0x99, 0xe3, 0x71, 0x35, 0xf3, 0x6a, 0x5c, 0xcd, 0xbc, 0x1e, 0x57, 0x33, - 0x7f, 0x8d, 0xab, 0x99, 0x97, 0x7f, 0x57, 0x97, 0x9e, 0x66, 0x8f, 0x36, 0xff, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x3b, 0x9e, 0x66, 0x71, 0xfc, 0x0b, 0x00, 0x00, + // 1164 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xfa, 0x5f, 0xe2, 0x71, 0x1b, 0xc8, 0x40, 0xa9, 0x1b, 0x29, 0x76, 0xba, 0x07, 0x94, + 0xa2, 0xb2, 0x4b, 0x42, 0x81, 0x4a, 0x88, 0x43, 0x9d, 0x22, 0x48, 0xe3, 0xa4, 0xd1, 0xd8, 0xa2, + 0xa2, 0xea, 0x81, 0xc9, 0xee, 0x78, 0x3d, 0xd8, 0x9e, 0x5d, 0x66, 0xc6, 0x29, 0xbe, 0xa0, 0x4a, + 0x7c, 0x81, 0xf2, 0x6d, 0xb8, 0x73, 0xc9, 0xb1, 0xc7, 0x1e, 0x90, 0x45, 0xcc, 0x91, 0x6f, 0x90, + 0x13, 0x9a, 0xd9, 0xb1, 0x77, 0xed, 0x38, 0xa9, 0x8b, 0xb8, 0xed, 0xbc, 0xf7, 0xfb, 0xfd, 0xde, + 0x9b, 0xf7, 0xde, 0xbe, 0x01, 0xdb, 0x01, 0x95, 0xed, 0xfe, 0xb1, 0xe3, 0x85, 0x3d, 0x37, 0x8c, + 0x08, 0x13, 0x6d, 0xda, 0x92, 0x2e, 0x8e, 0xa8, 0xcb, 0xc3, 0xbe, 0x24, 0xee, 0xc9, 0xb6, 0x1b, + 0x10, 0x46, 0x38, 0x96, 0xc4, 0x77, 0x22, 0x1e, 0xca, 0x10, 0xde, 0x4e, 0x28, 0xce, 0x84, 0xe2, + 0xe0, 0x88, 0x3a, 0x9a, 0xe2, 0x9c, 0x6c, 0xaf, 0x7f, 0x9c, 0x52, 0x0d, 0xc2, 0x20, 0x74, 0x35, + 0xf3, 0xb8, 0xdf, 0xd2, 0x27, 0x7d, 0xd0, 0x5f, 0xb1, 0xe2, 0xba, 0xdd, 0xb9, 0x2f, 0x1c, 0x1a, + 0xea, 0xb0, 0x5e, 0xc8, 0xe7, 0x45, 0x5d, 0xbf, 0x97, 0x60, 0x7a, 0xd8, 0x6b, 0x53, 0x46, 0xf8, + 0xc0, 0x8d, 0x3a, 0x81, 0x32, 0x08, 0xb7, 0x47, 0x24, 0x9e, 0xc7, 0xfa, 0xfc, 0x32, 0x16, 0xef, + 0x33, 0x49, 0x7b, 0xc4, 0x15, 0x5e, 0x9b, 0xf4, 0xf0, 0x05, 0xde, 0xa7, 0x97, 0xf1, 0xfa, 0x92, + 0x76, 0x5d, 0xca, 0xa4, 0x90, 0x7c, 0x96, 0x64, 0xff, 0x96, 0x01, 0x79, 0xa4, 0x4a, 0x00, 0x7f, + 0x00, 0x2b, 0x2a, 0x23, 0x1f, 0x4b, 0x5c, 0xb6, 0x36, 0xad, 0xad, 0xd2, 0xce, 0x27, 0x4e, 0xac, + 0xe8, 0xa4, 0x15, 0x9d, 0xa8, 0x13, 0x28, 0x83, 0x70, 0x14, 0xda, 0x39, 0xd9, 0x76, 0x1e, 0x1f, + 0xff, 0x48, 0x3c, 0x79, 0x40, 0x24, 0xae, 0xc1, 0xd3, 0x61, 0x75, 0x69, 0x34, 0xac, 0x82, 0xc4, + 0x86, 0x26, 0xaa, 0xf0, 0x10, 0xe4, 0x44, 0x44, 0xbc, 0x72, 0x46, 0xab, 0xdf, 0x75, 0xde, 0xd8, + 0x13, 0x47, 0x67, 0xd6, 0x88, 0x88, 0x57, 0xbb, 0x66, 0x94, 0x73, 0xea, 0x84, 0xb4, 0x0e, 0xfc, + 0x0e, 0x14, 0x84, 0xc4, 0xb2, 0x2f, 0xca, 0x59, 0xad, 0xe8, 0x2c, 0xac, 0xa8, 0x59, 0xb5, 0x55, + 0xa3, 0x59, 0x88, 0xcf, 0xc8, 0xa8, 0xd9, 0xbf, 0x66, 0xc1, 0x35, 0x8d, 0xdb, 0x63, 0x01, 0x27, + 0x42, 0xc0, 0x4d, 0x90, 0x6b, 0x87, 0x42, 0xea, 0xb2, 0x14, 0x93, 0x54, 0xbe, 0x0d, 0x85, 0x44, + 0xda, 0x03, 0x77, 0x00, 0xd0, 0x21, 0xf8, 0x21, 0xee, 0x11, 0x7d, 0xc1, 0x62, 0x52, 0x0c, 0x34, + 0xf1, 0xa0, 0x14, 0x0a, 0x76, 0x01, 0xf0, 0x42, 0xe6, 0x53, 0x49, 0x43, 0xa6, 0xae, 0x90, 0xdd, + 0x2a, 0xed, 0xdc, 0x5f, 0xf4, 0x0a, 0x26, 0xb5, 0xdd, 0xb1, 0x40, 0x12, 0x6d, 0x62, 0x12, 0x28, + 0xa5, 0x0f, 0x9b, 0x60, 0xf5, 0x39, 0xed, 0xfa, 0x1e, 0xe6, 0xfe, 0x51, 0xd8, 0xa5, 0xde, 0xa0, + 0x9c, 0xd3, 0x59, 0xde, 0x35, 0xbc, 0xd5, 0x27, 0x53, 0xde, 0xf3, 0x61, 0x15, 0x4e, 0x5b, 0x9a, + 0x83, 0x88, 0xa0, 0x19, 0x0d, 0xf8, 0x3d, 0xb8, 0x19, 0xdf, 0x68, 0x17, 0xb3, 0x90, 0x51, 0x0f, + 0x77, 0x55, 0x51, 0x98, 0x2a, 0x42, 0x5e, 0xcb, 0x57, 0x8d, 0xfc, 0x4d, 0x34, 0x1f, 0x86, 0x2e, + 0xe3, 0xdb, 0xff, 0x64, 0xc0, 0x8d, 0xb9, 0x57, 0x85, 0x5f, 0x81, 0x9c, 0x1c, 0x44, 0xc4, 0xb4, + 0xe3, 0xce, 0xb8, 0x1d, 0x2a, 0xc1, 0xf3, 0x61, 0xf5, 0xd6, 0x5c, 0x92, 0xce, 0x5e, 0xd3, 0x60, + 0x7d, 0x32, 0x36, 0x71, 0x9f, 0xee, 0x4d, 0x8f, 0xc1, 0xf9, 0xb0, 0x3a, 0xe7, 0xdf, 0x76, 0x26, + 0x4a, 0xd3, 0xc3, 0x02, 0x3f, 0x04, 0x05, 0x4e, 0xb0, 0x08, 0x99, 0x1e, 0xc2, 0x62, 0x32, 0x54, + 0x48, 0x5b, 0x91, 0xf1, 0xc2, 0x3b, 0x60, 0xb9, 0x47, 0x84, 0xc0, 0x01, 0x31, 0x85, 0x7f, 0xc7, + 0x00, 0x97, 0x0f, 0x62, 0x33, 0x1a, 0xfb, 0x21, 0x07, 0xb0, 0x8b, 0x85, 0x6c, 0x72, 0xcc, 0x44, + 0x9c, 0x3c, 0x35, 0xf5, 0x2c, 0xed, 0x7c, 0xb4, 0xd8, 0x3f, 0xa9, 0x18, 0xb5, 0x0f, 0x46, 0xc3, + 0x2a, 0xac, 0x5f, 0x50, 0x42, 0x73, 0xd4, 0xed, 0xdf, 0x2d, 0x50, 0xd4, 0x85, 0xab, 0x53, 0x21, + 0xe1, 0xb3, 0x0b, 0xbb, 0xc0, 0x59, 0x2c, 0xae, 0x62, 0xeb, 0x4d, 0xf0, 0xae, 0xb9, 0xdd, 0xca, + 0xd8, 0x92, 0xda, 0x03, 0x07, 0x20, 0x4f, 0x25, 0xe9, 0xa9, 0xfa, 0xab, 0x99, 0xdf, 0x5a, 0x74, + 0xe6, 0x6b, 0xd7, 0x8d, 0x68, 0x7e, 0x4f, 0xd1, 0x51, 0xac, 0x62, 0xff, 0x64, 0x32, 0x3f, 0x0a, + 0xb9, 0x84, 0x3e, 0x00, 0x12, 0xf3, 0x80, 0x48, 0x75, 0x7a, 0xe3, 0x1e, 0x53, 0x9b, 0xd1, 0x89, + 0x37, 0xa3, 0xb3, 0xc7, 0xe4, 0x63, 0xde, 0x90, 0x9c, 0xb2, 0x20, 0xf9, 0x99, 0x9a, 0x13, 0x2d, + 0x94, 0xd2, 0xb5, 0xff, 0xc8, 0x99, 0x98, 0x6a, 0x1b, 0x2d, 0xb0, 0x1e, 0x36, 0x41, 0x2e, 0xc2, + 0xb2, 0x6d, 0x06, 0x6e, 0x82, 0x38, 0xc2, 0xb2, 0x8d, 0xb4, 0x07, 0x36, 0x40, 0x46, 0x86, 0x66, + 0x8f, 0x7d, 0xb1, 0x68, 0x41, 0xe2, 0xec, 0x10, 0x69, 0x11, 0x4e, 0x98, 0x47, 0x6a, 0xc0, 0x08, + 0x67, 0x9a, 0x21, 0xca, 0xc8, 0x10, 0xbe, 0xb0, 0xc0, 0x1a, 0xee, 0x4a, 0xc2, 0x19, 0x96, 0xa4, + 0x86, 0xbd, 0x0e, 0x61, 0xbe, 0x28, 0xe7, 0x74, 0xd5, 0xff, 0x73, 0x90, 0x5b, 0x26, 0xc8, 0xda, + 0x83, 0x59, 0x65, 0x74, 0x31, 0x18, 0x7c, 0x04, 0x72, 0x91, 0xea, 0x44, 0xfe, 0xed, 0x76, 0xbe, + 0xaa, 0x72, 0x6d, 0x45, 0xd7, 0x48, 0xd5, 0x5e, 0x6b, 0xc0, 0x6f, 0x40, 0x56, 0x76, 0x45, 0xb9, + 0xb0, 0xb0, 0x54, 0xb3, 0xde, 0xd8, 0x0d, 0x59, 0x8b, 0x06, 0xb5, 0xe5, 0xd1, 0xb0, 0x9a, 0x6d, + 0xd6, 0x1b, 0x48, 0x29, 0xcc, 0xd9, 0x85, 0xcb, 0xff, 0xc3, 0x2e, 0x74, 0x41, 0x51, 0xf4, 0x8f, + 0xfd, 0xb0, 0x87, 0x29, 0x2b, 0xaf, 0x68, 0xc1, 0x35, 0x23, 0x58, 0x6c, 0x8c, 0x1d, 0x28, 0xc1, + 0xd8, 0x14, 0x94, 0x52, 0xcf, 0x11, 0x7c, 0x0a, 0x96, 0x69, 0xbc, 0xb5, 0xca, 0x96, 0x6e, 0x91, + 0xfb, 0x96, 0x8f, 0x41, 0xb2, 0x52, 0x8c, 0x01, 0x8d, 0x05, 0xed, 0x5f, 0xc0, 0xfb, 0xf3, 0x9a, + 0xa9, 0x06, 0xb3, 0x43, 0x99, 0x3f, 0x3b, 0xba, 0xfb, 0x94, 0xf9, 0x48, 0x7b, 0x14, 0x82, 0x25, + 0x6f, 0xda, 0x04, 0xa1, 0x5f, 0x33, 0xed, 0x81, 0x36, 0x28, 0x3c, 0x27, 0x34, 0x68, 0x4b, 0x3d, + 0xbe, 0xf9, 0x1a, 0x50, 0xdb, 0xef, 0x89, 0xb6, 0x20, 0xe3, 0xb1, 0x43, 0x73, 0x55, 0xde, 0x68, + 0x63, 0xee, 0xeb, 0x52, 0xa9, 0x0f, 0xfd, 0x5a, 0x5a, 0x33, 0xa5, 0x1a, 0x3b, 0x50, 0x82, 0x51, + 0x04, 0x9f, 0x89, 0x46, 0xbf, 0xd5, 0xa2, 0x3f, 0x9b, 0x54, 0x26, 0x84, 0x87, 0x87, 0x8d, 0xd8, + 0x81, 0x12, 0x8c, 0xfd, 0x67, 0x16, 0x14, 0x27, 0xed, 0x87, 0xfb, 0xa0, 0x24, 0x09, 0xef, 0x51, + 0x86, 0xd5, 0xc2, 0x9b, 0x79, 0x38, 0x4a, 0xcd, 0xc4, 0xa5, 0x5a, 0xdd, 0xac, 0x37, 0x52, 0x16, + 0xdd, 0xea, 0x34, 0x1b, 0x7e, 0x06, 0x4a, 0x1e, 0xe1, 0x92, 0xb6, 0xa8, 0x87, 0xe5, 0xb8, 0x30, + 0xef, 0x8d, 0xc5, 0x76, 0x13, 0x17, 0x4a, 0xe3, 0xe0, 0x06, 0xc8, 0x76, 0xc8, 0xc0, 0xbc, 0x12, + 0x25, 0x03, 0xcf, 0xee, 0x93, 0x01, 0x52, 0x76, 0xf8, 0x25, 0xb8, 0xee, 0xe1, 0x14, 0xd9, 0xbc, + 0x12, 0x37, 0x0c, 0xf0, 0xfa, 0xee, 0x83, 0xb4, 0xf2, 0x34, 0x16, 0x3e, 0x03, 0x65, 0x9f, 0x08, + 0x69, 0x32, 0x9c, 0x82, 0x9a, 0x77, 0x78, 0xd3, 0xe8, 0x94, 0x1f, 0x5e, 0x82, 0x43, 0x97, 0x2a, + 0xc0, 0x97, 0x16, 0xd8, 0xa0, 0x4c, 0x10, 0xaf, 0xcf, 0xc9, 0xd7, 0x7e, 0x40, 0x52, 0xd5, 0x31, + 0xbf, 0x4f, 0x41, 0xc7, 0x78, 0x64, 0x62, 0x6c, 0xec, 0x5d, 0x05, 0x3e, 0x1f, 0x56, 0x6f, 0x5f, + 0x09, 0xd0, 0x15, 0xbf, 0x3a, 0x60, 0x6d, 0xeb, 0xf4, 0xac, 0xb2, 0xf4, 0xea, 0xac, 0xb2, 0xf4, + 0xfa, 0xac, 0xb2, 0xf4, 0x62, 0x54, 0xb1, 0x4e, 0x47, 0x15, 0xeb, 0xd5, 0xa8, 0x62, 0xbd, 0x1e, + 0x55, 0xac, 0xbf, 0x46, 0x15, 0xeb, 0xe5, 0xdf, 0x95, 0xa5, 0xa7, 0x99, 0x93, 0xed, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x6b, 0xe5, 0x21, 0x65, 0x2d, 0x0c, 0x00, 0x00, } diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index c75f8bb025f4..00d9751333d5 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -121,7 +121,23 @@ message RouteSpec { // Must follow DNS952 subdomain conventions. optional string host = 1; - // Path that the router watches for, to route traffic for to the service. Optional + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + optional string subdomain = 8; + + // path that the router watches for, to route traffic for to the service. Optional optional string path = 2; // to is an object the route should use as the primary backend. Only the Service kind diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 6c14ae7187bb..b7cee760ac3f 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -70,7 +70,23 @@ type RouteSpec struct { // chosen. // Must follow DNS952 subdomain conventions. Host string `json:"host" protobuf:"bytes,1,opt,name=host"` - // Path that the router watches for, to route traffic for to the service. Optional + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + Subdomain string `json:"subdomain" protobuf:"bytes,8,opt,name=subdomain"` + + // path that the router watches for, to route traffic for to the service. Optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // to is an object the route should use as the primary backend. Only the Service kind diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go index f0727d93fdef..7d0cb5e2b4f9 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -70,7 +70,8 @@ func (RoutePort) SwaggerDoc() map[string]string { var map_RouteSpec = map[string]string{ "": "RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 1. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response.\n\nThe `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate.", "host": "host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions.", - "path": "Path that the router watches for, to route traffic for to the service. Optional", + "subdomain": "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", + "path": "path that the router watches for, to route traffic for to the service. Optional", "to": "to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 1) is set to zero, no traffic will be sent to this backend.", "alternateBackends": "alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference.", "port": "If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use.", diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index 0df3a30533b9..88ba36cdb86a 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -222,6 +222,7 @@ message SecurityContextConstraints { // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional + // +nullable repeated AllowedFlexVolume allowedFlexVolumes = 21; // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. @@ -239,11 +240,13 @@ message SecurityContextConstraints { // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional + // +nullable optional bool defaultAllowPrivilegeEscalation = 22; // AllowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional + // +nullable optional bool allowPrivilegeEscalation = 23; // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. @@ -267,10 +270,12 @@ message SecurityContextConstraints { // The users who have permissions to use this security context constraints // +optional + // +nullable repeated string users = 18; // The groups that have permission to use this security context constraints // +optional + // +nullable repeated string groups = 19; // SeccompProfiles lists the allowed profiles that may be set for the pod or @@ -289,6 +294,7 @@ message SecurityContextConstraints { // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional + // +nullable repeated string allowedUnsafeSysctls = 24; // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. @@ -299,6 +305,7 @@ message SecurityContextConstraints { // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional + // +nullable repeated string forbiddenSysctls = 25; } diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index ad3a3ca80591..06c48ce5f6bc 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -59,6 +59,7 @@ type SecurityContextConstraints struct { // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional + // +nullable AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` @@ -71,10 +72,12 @@ type SecurityContextConstraints struct { // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional + // +nullable DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` // AllowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional + // +nullable AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` @@ -93,9 +96,11 @@ type SecurityContextConstraints struct { // The users who have permissions to use this security context constraints // +optional + // +nullable Users []string `json:"users" protobuf:"bytes,18,rep,name=users"` // The groups that have permission to use this security context constraints // +optional + // +nullable Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` // SeccompProfiles lists the allowed profiles that may be set for the pod or @@ -114,6 +119,7 @@ type SecurityContextConstraints struct { // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional + // +nullable AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered @@ -123,6 +129,7 @@ type SecurityContextConstraints struct { // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional + // +nullable ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"` } diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go index 3a6db5284c98..60a167915ed5 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -61,7 +61,7 @@ func (PodSecurityPolicyReviewSpec) SwaggerDoc() map[string]string { } var map_PodSecurityPolicyReviewStatus = map[string]string{ - "": "PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.", + "": "PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.", "allowedServiceAccounts": "allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.", } diff --git a/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go index 850d7c34d6d8..9e96d0774c33 100644 --- a/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go @@ -37,7 +37,7 @@ func (ExtensionsConfiguration) SwaggerDoc() map[string]string { } var map_FeaturesConfiguration = map[string]string{ - "": "FeaturesConfiguration defines various feature gates for the web console", + "": "FeaturesConfiguration defines various feature gates for the web console", "inactivityTimeoutMinutes": "InactivityTimeoutMinutes is the number of minutes of inactivity before you are automatically logged out of the web console (optional). If set to 0, inactivity timeout is disabled.", "clusterResourceOverridesEnabled": "ClusterResourceOverridesEnabled indicates that the cluster is configured for overcommit. When set to true, the web console will hide the CPU request, CPU limit, and memory request fields in its editors and skip validation on those fields. The memory limit field will still be displayed.", } diff --git a/vendor/github.com/openshift/client-go/.travis.yml b/vendor/github.com/openshift/client-go/.travis.yml index 4766d57576da..7f931bc71a2e 100644 --- a/vendor/github.com/openshift/client-go/.travis.yml +++ b/vendor/github.com/openshift/client-go/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - "1.10" + - "1.11" script: - make verify build build-examples diff --git a/vendor/github.com/openshift/client-go/glide.lock b/vendor/github.com/openshift/client-go/glide.lock index 848376b8545a..3b54e0b724b2 100644 --- a/vendor/github.com/openshift/client-go/glide.lock +++ b/vendor/github.com/openshift/client-go/glide.lock @@ -1,5 +1,5 @@ hash: c28d9721958d348280bb01aa0ce5eac67c2466b33fdd6ef2f8867866c8edb354 -updated: 2019-04-02T10:03:17.957026307-04:00 +updated: 2019-04-09T11:21:59.220356961-05:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 @@ -47,7 +47,7 @@ imports: - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 - name: github.com/openshift/api - version: d2f01e7b77a6fc78b328db20285423838419fef7 + version: 275768afc8f66b79aeb12debf94ffc85255104ad subpackages: - apps/v1 - authorization/v1 @@ -113,7 +113,7 @@ imports: subpackages: - imports - name: google.golang.org/appengine - version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 subpackages: - internal - internal/base @@ -237,7 +237,7 @@ imports: - util/integer - util/retry - name: k8s.io/code-generator - version: 2bf47b06188b89cc41d68422aa52509f2cbb4cc8 + version: b7d0f818cc1e1fc12b9acc529a1c27835609cdf4 repo: https://github.com/openshift/kubernetes-code-generator.git - name: k8s.io/gengo version: 51747d6e00da1fc578d5a333a93bb2abcbce7a95 diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go index cce6306c3135..cb37958a2337 100755 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go +++ b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go @@ -35,6 +35,8 @@ func Run() error { manifestDir := flag.String("manifests-dir", "manifests", "the directory with existing CRD manifests") outputDir := flag.String("output-dir", "", "optional directory to output the kubebuilder CRDs. By default a temporary directory is used.") verifyOnly := flag.Bool("verify-only", false, "do not write files, only compare and return with return code 1 if dirty") + domain := flag.String("domain", "", "the domain appended to group names.") + repo := flag.String("repo", "", "the repository package name (optional).") flag.Parse() @@ -54,6 +56,23 @@ func Run() error { return fmt.Errorf("error creating temp directory: %v\n", err) } defer os.RemoveAll(tmpDir) + relTmpDir := tmpDir[len(pwd)+1:] + + // find repo in GOPATH + sep := string([]rune{os.PathSeparator}) + GOPATH := strings.TrimRight(os.Getenv("GOPATH"), sep) + if len(*repo) == 0 && len(GOPATH) > 0 && strings.HasPrefix(pwd, filepath.Join(GOPATH, "src")+sep) { + *repo = pwd[len(filepath.Join(GOPATH, "src")+sep):] + fmt.Printf("Derived repo %q from GOPATH and working directory.\n", *repo) + } + + // validate params + if len(*repo) == 0 { + return fmt.Errorf("repo cannot be empty. Run crd-schema-gen in GOPATH or specify repo explicitly.") + } + if len(*domain) == 0 { + return fmt.Errorf("domain cannot be empty.") + } // copy APIs to temp dir fmt.Printf("Copying vendor/github.com/openshift/api/config to temporary pkg/apis...\n") @@ -66,11 +85,16 @@ func Run() error { fmt.Print(string(out)) return err } + if err := ioutil.WriteFile(filepath.Join(tmpDir, "PROJECT"), []byte(fmt.Sprintf(` +domain: %s +repo: %s/%s +`, *domain, *repo, relTmpDir)), 0644); err != nil { + return err + } // generate kubebuilder KindGroupYaml manifests into temp dir g := crdgenerator.Generator{ RootPath: tmpDir, - Domain: "openshift.io", OutputDir: filepath.Join(tmpDir, "manifests"), SkipMapValidation: true, } @@ -105,11 +129,13 @@ func Run() error { // update existing manifests with validations of kubebuilder output dirty := false + noneFound := true for fn, withValidation := range fromKubebuilder { existingFileName, ok := existingFileNames[withValidation.KindGroup] if !ok { continue } + noneFound = false crd := existing[existingFileName] @@ -201,6 +227,18 @@ func Run() error { } } + if noneFound { + fmt.Printf("None of the found API types has a corresponding CRD manifest. These API types where found:\n\n") + for _, withValidation := range fromKubebuilder { + fmt.Printf(" %s\n", withValidation.KindGroup) + } + fmt.Printf("These CRDs were found:\n\n") + for existingKindGroup := range existingFileNames { + fmt.Printf(" %s\n", existingKindGroup) + } + return fmt.Errorf("no API type for found CRD manifests") + } + if *verifyOnly && dirty { return fmt.Errorf("verification failed") } diff --git a/vendor/github.com/openshift/library-go/glide.lock b/vendor/github.com/openshift/library-go/glide.lock index 45f6368e88ea..483a42a5e9f5 100644 --- a/vendor/github.com/openshift/library-go/glide.lock +++ b/vendor/github.com/openshift/library-go/glide.lock @@ -1,5 +1,5 @@ -hash: 1736e530e2f520e62cbada3a813317bb7112f3b7d1178696be8b041dfcea4e52 -updated: 2019-04-02T11:09:33.83488978-04:00 +hash: 834b42ac04c13e26423b9cddffbd75a093f4d889cffb059911fae65aac364c7b +updated: 2019-04-17T10:52:09.03874547-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -128,7 +128,7 @@ imports: - name: github.com/go-openapi/swag version: 5899d5c5e619fda5fa86e14795a835f473ca284c - name: github.com/gobuffalo/envy - version: 1e6702b55a7afe795770f86ce76c13836a591904 + version: 043cb4b8af871b49563291e32c66bb84378a60ac - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -136,8 +136,6 @@ imports: - proto - protoc-gen-gogo/descriptor - sortkeys -- name: github.com/golang/glog - version: 44145f04b68cf362d9c4df2182967c2275eaefed - name: github.com/golang/groupcache version: 02826c3e79038b59d737d3b1c0a1d937f71a4433 subpackages: @@ -153,13 +151,13 @@ imports: - ptypes/struct - ptypes/timestamp - name: github.com/gonum/blas - version: 37e82626499e1df7c54aeaba0959fd6e7e8dc1e4 + version: f22b278b28ac9805aadd613a754a60c35b24ae69 subpackages: - blas64 - native - native/internal/math32 - name: github.com/gonum/floats - version: f74b330d45c56584a6ea7a27f5c64ea2900631e9 + version: c233463c7e827fd71a8cdb62dfda0e98f7c39ad5 - name: github.com/gonum/graph version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 subpackages: @@ -174,17 +172,17 @@ imports: - internal/ordered - simple - name: github.com/gonum/internal - version: e57e4534cf9b3b00ef6c0175f59d8d2d34f60914 + version: f884aa71402950fb2796dbea0d5aa9ef9cfad8ca subpackages: - asm/f32 - asm/f64 - name: github.com/gonum/lapack - version: 5ed4b826becd1807e09377508f51756586d1a98c + version: e4cdc5a0bff924bb10be88482e635bd40429f65e subpackages: - lapack64 - native - name: github.com/gonum/matrix - version: dd6034299e4242c9f0ea36735e6d4264dfcb3f9f + version: c518dec07be9a636c38a4650e217be059b5952ec subpackages: - mat64 - name: github.com/google/btree @@ -212,13 +210,11 @@ imports: - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - name: github.com/joho/godotenv - version: 6d367c18edf6ca7fd004efd6863e4c5728fa858e + version: 5c0e6c6ab1a0a9ef0a8822cba3a05d62f7dad941 - name: github.com/json-iterator/go version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 - name: github.com/jteeuwen/go-bindata version: a0ff2567cfb70903282db057e799fd826784d41d -- name: github.com/kr/fs - version: 2788f0dbd16903de03cb8186e5c7d97b69ad387b - name: github.com/mailru/easyjson version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d subpackages: @@ -238,7 +234,7 @@ imports: - name: github.com/NYTimes/gziphandler version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb - name: github.com/openshift/api - version: d2f01e7b77a6fc78b328db20285423838419fef7 + version: 7924f9106f8e132f4f33a0c7fb8841b49bfc2d83 subpackages: - apps - apps/v1 @@ -284,7 +280,7 @@ imports: - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: ccdcda4156765b47c39f3f4d9722b66e38f49934 + version: 0255926f53935175fe90b8e7672c4c06c17d79e6 subpackages: - config/clientset/versioned - config/clientset/versioned/fake @@ -302,8 +298,6 @@ imports: version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/pkg/profile version: f6fe06335df110bcf1ed6d4e852b760bfc15beee -- name: github.com/pkg/sftp - version: 4d0e916071f68db74f8a73926335f809396d6b42 - name: github.com/prometheus/client_golang version: e7e903064f5e9eb5da98208bae10b475d4db0f8c subpackages: @@ -327,7 +321,7 @@ imports: - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e - name: github.com/rogpeppe/go-internal - version: bb30e2a7ca3553a66c6a740eb5ebe72f9d35ef0e + version: 438578804ca6f31be148c27683afc419ce47c06e subpackages: - modfile - module @@ -337,10 +331,9 @@ imports: - name: github.com/sirupsen/logrus version: 89742aefa4b206dcf400792f3bd35b542998eb3b - name: github.com/spf13/afero - version: b28a7effac979219c2a2ed6205a4d70e4b1bcd02 + version: 588a75ec4f32903aa5e39a2619ba6a4631e28424 subpackages: - mem - - sftp - name: github.com/spf13/cobra version: c439c4fa093711d42e1b01acb1235b52004753c1 - name: github.com/spf13/pflag @@ -350,13 +343,6 @@ imports: subpackages: - bcrypt - blowfish - - curve25519 - - ed25519 - - ed25519/internal/edwards25519 - - internal/chacha20 - - internal/subtle - - poly1305 - - ssh - ssh/terminal - name: golang.org/x/net version: 0ed95abb35c445290478a5348a7b38bb154135fd @@ -403,7 +389,7 @@ imports: - go/ast/astutil - imports - name: google.golang.org/appengine - version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 subpackages: - internal - internal/base @@ -450,7 +436,7 @@ imports: - name: gopkg.in/natefinch/lumberjack.v2 version: 20b71e5b60d756d3d2f80def009790325acc2b23 - name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 + version: 51d6538a90f86fe93ac480b35f37b2be17fef232 - name: k8s.io/api version: 5cb15d34447165a97c76ed5a60e4e99c8a01ecfe subpackages: @@ -489,7 +475,8 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: d002e88f6236312f0289d9d1deab106751718ff0 + version: 3c74db8dd172051b029f91536c681a1b43694809 + repo: https://github.com/openshift/kubernetes-apiextensions-apiserver subpackages: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 @@ -826,7 +813,7 @@ imports: - util/retry - util/workqueue - name: k8s.io/gengo - version: 4242d8e6c5dba56827bb7bcf14ad11cda38f3991 + version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 subpackages: - args - generator @@ -846,14 +833,15 @@ imports: - name: k8s.io/kube-openapi version: c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d subpackages: + - pkg/aggregator - pkg/builder - pkg/common - pkg/handler - pkg/util - pkg/util/proto - name: sigs.k8s.io/controller-tools - version: 43466124052c1a1aa7d8fd33624b00bc111fb7cf - repo: https://github.com/openshift/kubernetes-sigs-controller-tools.git + version: 4e23e49e5d401ca6ced86aa30262d0cf2488c504 + repo: https://github.com/openshift/kubernetes-sigs-controller-tools subpackages: - pkg/crd/generator - pkg/crd/util @@ -865,6 +853,6 @@ imports: version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 testImports: - name: vbom.ml/util - version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394 + version: efcd4e0f97874370259c7d93e12aad57911dea81 subpackages: - sortorder diff --git a/vendor/github.com/openshift/library-go/glide.yaml b/vendor/github.com/openshift/library-go/glide.yaml index bb33eae1de91..8f5129a9b325 100644 --- a/vendor/github.com/openshift/library-go/glide.yaml +++ b/vendor/github.com/openshift/library-go/glide.yaml @@ -1,12 +1,11 @@ package: github.com/openshift/library-go import: - package: k8s.io/apimachinery + version: kubernetes-1.13.4 - package: k8s.io/api version: kubernetes-1.13.4 - package: k8s.io/apiserver version: kubernetes-1.13.4 -- package: k8s.io/apiextensions-apiserver - version: kubernetes-1.13.4 - package: k8s.io/kube-aggregator version: kubernetes-1.13.4 - package: k8s.io/client-go @@ -17,11 +16,15 @@ import: version: master # crd-schema-gen + # TODO: we need to this to get nullable patch, but we will replace this with new repo soon. +- package: k8s.io/apiextensions-apiserver + repo: https://github.com/openshift/kubernetes-apiextensions-apiserver + version: origin-4.1-kubernetes-1.13.4 - package: sigs.k8s.io/controller-tools - version: 43466124052c1a1aa7d8fd33624b00bc111fb7cf - repo: https://github.com/openshift/kubernetes-sigs-controller-tools.git + repo: https://github.com/openshift/kubernetes-sigs-controller-tools + version: origin-4.1-kubernetes-1.13.4 - package: k8s.io/gengo - version: 4242d8e6c5dba56827bb7bcf14ad11cda38f3991 + version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 # sig-master - needed for file observer - package: github.com/sigma/go-inotify diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go index 4ca9d1e16c66..e9ce7e96412c 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go @@ -132,7 +132,7 @@ func TestEnsureManifestsCreated(t *testing.T) { if !strings.Contains(out.String(), `no matches for kind "KubeAPIServerOperatorConfig"`) { t.Fatalf("expected error logged to output when verbose is on, got: %s\n", out.String()) } - if !strings.Contains(out.String(), `Created apiextensions.k8s.io/v1beta1`) { + if !strings.Contains(out.String(), `Created "0000_10_kube-apiserver-operator_01_config.crd.yaml" customresourcedefinitions.v1beta1.apiextensions.k8s.io`) { t.Fatalf("expected success logged to output when verbose is on, got: %s\n", out.String()) } } diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go index 8eb944ff235e..cec47ed26ad2 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go @@ -158,7 +158,7 @@ func create(ctx context.Context, manifests map[string]*unstructured.Unstructured gvk := manifests[path].GetObjectKind().GroupVersionKind() mappings, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { - errs[path] = fmt.Errorf("unable to get REST mapping: %v", err) + errs[path] = fmt.Errorf("unable to get REST mapping for %q: %v", path, err) reloadDiscovery = true continue } @@ -169,11 +169,13 @@ func create(ctx context.Context, manifests map[string]*unstructured.Unstructured _, err = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()).Create(manifests[path], metav1.CreateOptions{}) } + resourceString := mappings.Resource.Resource + "." + mappings.Resource.Version + "." + mappings.Resource.Group + "/" + manifests[path].GetName() + " -n " + manifests[path].GetNamespace() + // Resource already exists means we already succeeded // This should never happen as we remove already created items from the manifest list, unless the resource existed beforehand. if kerrors.IsAlreadyExists(err) { if options.Verbose { - fmt.Fprintf(options.StdErr, "Skipped %s as it already exists\n", mappings.Resource.String()) + fmt.Fprintf(options.StdErr, "Skipped %q %s as it already exists\n", path, resourceString) } delete(manifests, path) continue @@ -181,14 +183,14 @@ func create(ctx context.Context, manifests map[string]*unstructured.Unstructured if err != nil { if options.Verbose { - fmt.Fprintf(options.StdErr, "Failed to create %s: %v\n", mappings.Resource.String(), err) + fmt.Fprintf(options.StdErr, "Failed to create %q %s: %v\n", path, resourceString, err) } errs[path] = fmt.Errorf("failed to create: %v", err) continue } if options.Verbose { - fmt.Fprintf(options.StdErr, "Created %s\n", mappings.Resource.String()) + fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) } // Creation succeeded lets remove the manifest from the list to avoid creating it second time diff --git a/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go index 214e45c0c70b..a247311057b9 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go +++ b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go @@ -8,6 +8,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" ) // GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, @@ -24,11 +26,11 @@ func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides *ClientConn applyClientConnectionOverrides(overrides, clientConfig) - t := &clientTransportOverrides{} + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} if overrides != nil { - t.maxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost } - clientConfig.WrapTransport = t.defaultClientTransport + clientConfig.WrapTransport = t.DefaultClientTransport return clientConfig, nil } @@ -49,11 +51,11 @@ func GetClientConfig(kubeConfigFile string, overrides *ClientConnectionOverrides } applyClientConnectionOverrides(overrides, clientConfig) - t := &clientTransportOverrides{} + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} if overrides != nil { - t.maxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost } - clientConfig.WrapTransport = t.defaultClientTransport + clientConfig.WrapTransport = t.DefaultClientTransport return clientConfig, nil } @@ -76,6 +78,7 @@ func applyClientConnectionOverrides(overrides *ClientConnectionOverrides, kubeCo kubeConfig.ContentConfig.ContentType = overrides.ContentType } + // TODO both of these default values look wrong // if we have no preferences at this point, claim that we accept both proto and json. We will get proto if the server supports it. // this is a slightly niggly thing. If the server has proto and our client does not (possible, but not super likely) then this fails. if len(kubeConfig.ContentConfig.AcceptContentTypes) == 0 { @@ -86,12 +89,13 @@ func applyClientConnectionOverrides(overrides *ClientConnectionOverrides, kubeCo } } -type clientTransportOverrides struct { - maxIdleConnsPerHost int +type ClientTransportOverrides struct { + WrapTransport func(rt http.RoundTripper) http.RoundTripper + MaxIdleConnsPerHost int } // defaultClientTransport sets defaults for a client Transport that are suitable for use by infrastructure components. -func (c *clientTransportOverrides) defaultClientTransport(rt http.RoundTripper) http.RoundTripper { +func (c ClientTransportOverrides) DefaultClientTransport(rt http.RoundTripper) http.RoundTripper { transport, ok := rt.(*http.Transport) if !ok { return rt @@ -104,29 +108,24 @@ func (c *clientTransportOverrides) defaultClientTransport(rt http.RoundTripper) // Hold open more internal idle connections transport.MaxIdleConnsPerHost = 100 - if c.maxIdleConnsPerHost > 0 { - transport.MaxIdleConnsPerHost = c.maxIdleConnsPerHost + if c.MaxIdleConnsPerHost > 0 { + transport.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost } - return transport + if c.WrapTransport == nil { + return transport + + } + return c.WrapTransport(transport) } // ClientConnectionOverrides allows overriding values for rest.Config not held in a kubeconfig. Most commonly used // for QPS. Empty values are not used. type ClientConnectionOverrides struct { - // AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the - // default value of 'application/json'. This field will control all connections to the server used by a particular - // client. - AcceptContentTypes string - // ContentType is the content type used when sending data to the server from this client. - ContentType string - - // QPS controls the number of queries per second allowed for this connection. - QPS float32 - // Burst allows extra queries to accumulate when a client is exceeding its rate. - Burst int32 + configv1.ClientConnectionOverrides // MaxIdleConnsPerHost, if non-zero, controls the maximum idle (keep-alive) connections to keep per-host:port. // If zero, DefaultMaxIdleConnsPerHost is used. + // TODO roll this into the connection overrides in api MaxIdleConnsPerHost int } diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go index f02d3b49f05c..f28ef543f163 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go @@ -2,16 +2,17 @@ package helpers import ( "io/ioutil" - "net" - "net/http" - "time" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/client" ) +// TODO this file needs to collapse with pkg/config/client. We cannot safely delegate from this file because this one +// TODO uses JSON and other uses protobuf. + // GetKubeClientConfig loads in-cluster config if kubeConfigFile is empty or the file if not, then applies overrides. func GetKubeClientConfig(kubeClientConnection configv1.KubeClientConfig) (*rest.Config, error) { return GetKubeConfigOrInClusterConfig(kubeClientConnection.KubeConfig, kubeClientConnection.ConnectionOverrides) @@ -29,7 +30,7 @@ func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides configv1.Cl return nil, err } applyClientConnectionOverrides(overrides, clientConfig) - clientConfig.WrapTransport = DefaultClientTransport + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport return clientConfig, nil } @@ -48,7 +49,7 @@ func GetClientConfig(kubeConfigFile string, overrides configv1.ClientConnectionO return nil, err } applyClientConnectionOverrides(overrides, clientConfig) - clientConfig.WrapTransport = DefaultClientTransport + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport return clientConfig, nil } @@ -68,23 +69,3 @@ func applyClientConnectionOverrides(overrides configv1.ClientConnectionOverrides kubeConfig.ContentConfig.ContentType = overrides.ContentType } } - -// DefaultClientTransport sets defaults for a client Transport that are suitable -// for use by infrastructure components. -func DefaultClientTransport(rt http.RoundTripper) http.RoundTripper { - transport, ok := rt.(*http.Transport) - if !ok { - return rt - } - - // TODO: this should be configured by the caller, not in this method. - dialer := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - transport.Dial = dialer.Dial - // Hold open more internal idle connections - // TODO: this should be configured by the caller, not in this method. - transport.MaxIdleConnsPerHost = 100 - return transport -} diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go index 0382849ac3cf..f3399e867d67 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -75,13 +75,13 @@ func LeaderElectionDefaulting(config configv1.LeaderElection, defaultNamespace, ret := *(&config).DeepCopy() if ret.LeaseDuration.Duration == 0 { - ret.LeaseDuration.Duration = 120 * time.Second + ret.LeaseDuration.Duration = 60 * time.Second } if ret.RenewDeadline.Duration == 0 { - ret.RenewDeadline.Duration = 90 * time.Second + ret.RenewDeadline.Duration = 35 * time.Second } if ret.RetryPeriod.Duration == 0 { - ret.RetryPeriod.Duration = 20 * time.Second + ret.RetryPeriod.Duration = 10 * time.Second } if len(ret.Namespace) == 0 { if len(defaultNamespace) > 0 { diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go index af74b74d50cc..3869d5c2e925 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go @@ -1,17 +1,23 @@ package serving import ( + "context" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" genericapiserveroptions "k8s.io/apiserver/pkg/server/options" + "k8s.io/klog" configv1 "github.com/openshift/api/config/v1" operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" ) -func ToServerConfig(servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization, kubeConfigFile string) (*genericapiserver.Config, error) { +func ToServerConfig(ctx context.Context, servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization, + kubeConfigFile string) (*genericapiserver.Config, error) { scheme := runtime.NewScheme() metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) config := genericapiserver.NewConfig(serializer.NewCodecFactory(scheme)) @@ -20,23 +26,51 @@ func ToServerConfig(servingInfo configv1.HTTPServingInfo, authenticationConfig o if err != nil { return nil, err } + if err := servingOptions.ApplyTo(&config.SecureServing, &config.LoopbackClientConfig); err != nil { return nil, err } + var lastApplyErr error + + pollCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if !authenticationConfig.Disabled { authenticationOptions := genericapiserveroptions.NewDelegatingAuthenticationOptions() authenticationOptions.RemoteKubeConfigFile = kubeConfigFile - if err := authenticationOptions.ApplyTo(&config.Authentication, config.SecureServing, config.OpenAPIConfig); err != nil { - return nil, err + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authenticationOptions.ApplyTo(&config.Authentication, config.SecureServing, config.OpenAPIConfig) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authentication (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr } } if !authorizationConfig.Disabled { authorizationOptions := genericapiserveroptions.NewDelegatingAuthorizationOptions() authorizationOptions.RemoteKubeConfigFile = kubeConfigFile - if err := authorizationOptions.ApplyTo(&config.Authorization); err != nil { - return nil, err + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authorizationOptions.ApplyTo(&config.Authorization) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authorization (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr } } diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go index 624be5ed4753..01ed53da636b 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go @@ -67,10 +67,11 @@ type ControllerBuilder struct { fileObserver fileobserver.Observer fileObserverReactorFn func(file string, action fileobserver.ActionType) error - startFunc StartFunc - componentName string - instanceIdentity string - observerInterval time.Duration + startFunc StartFunc + componentName string + componentNamespace string + instanceIdentity string + observerInterval time.Duration servingInfo *configv1.HTTPServingInfo authenticationConfig *operatorv1alpha1.DelegatedAuthentication @@ -114,6 +115,11 @@ func (b *ControllerBuilder) WithRestartOnChange(stopCh chan<- struct{}, starting return b } +func (b *ControllerBuilder) WithComponentNamespace(ns string) *ControllerBuilder { + b.componentNamespace = ns + return b +} + // WithLeaderElection adds leader election options func (b *ControllerBuilder) WithLeaderElection(leaderElection configv1.LeaderElection, defaultNamespace, defaultName string) *ControllerBuilder { if leaderElection.Disable { @@ -166,13 +172,13 @@ func (b *ControllerBuilder) Run(config *unstructured.Unstructured, ctx context.C } kubeClient := kubernetes.NewForConfigOrDie(clientConfig) - namespace, err := b.getNamespace() + namespace, err := b.getComponentNamespace() if err != nil { - panic("unable to read the namespace") + klog.Warningf("unable to identify the current namespace for events: %v", err) } controllerRef, err := events.GetControllerReferenceForCurrentPod(kubeClient, namespace, nil) if err != nil { - panic(fmt.Sprintf("unable to obtain replicaset reference for events: %v", err)) + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) } eventRecorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(namespace), b.componentName, controllerRef) @@ -193,7 +199,7 @@ func (b *ControllerBuilder) Run(config *unstructured.Unstructured, ctx context.C if b.kubeAPIServerConfigFile != nil { kubeConfig = *b.kubeAPIServerConfigFile } - serverConfig, err := serving.ToServerConfig(*b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) + serverConfig, err := serving.ToServerConfig(ctx, *b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) if err != nil { return err } @@ -246,12 +252,15 @@ func (b *ControllerBuilder) Run(config *unstructured.Unstructured, ctx context.C return fmt.Errorf("exited") } -func (b *ControllerBuilder) getNamespace() (string, error) { +func (b *ControllerBuilder) getComponentNamespace() (string, error) { + if len(b.componentNamespace) > 0 { + return b.componentNamespace, nil + } nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { - return "", err + return "openshift-config-managed", err } - return string(nsBytes), err + return string(nsBytes), nil } func (b *ControllerBuilder) getClientConfig() (*rest.Config, error) { diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go index f982be3a7048..099b42a87c2e 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "k8s.io/klog" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/version" @@ -76,21 +77,12 @@ func (c *ControllerCommandConfig) NewCommand() *cobra.Command { return cmd } -func hasServiceServingCerts(certDir string) bool { - if _, err := os.Stat(filepath.Join(certDir, "tls.crt")); os.IsNotExist(err) { - return false - } - if _, err := os.Stat(filepath.Join(certDir, "tls.key")); os.IsNotExist(err) { - return false - } - return true -} - -// StartController runs the controller -func (c *ControllerCommandConfig) StartController(ctx context.Context) error { +// Config returns the configuration of this command. Use StartController if you don't need to customize the default operator. +// This method does not modify the receiver. +func (c *ControllerCommandConfig) Config() (*unstructured.Unstructured, *operatorv1alpha1.GenericOperatorConfig, []byte, error) { configContent, unstructuredConfig, err := c.basicFlags.ToConfigObj() if err != nil { - return err + return nil, nil, nil, err } config := &operatorv1alpha1.GenericOperatorConfig{} if unstructuredConfig != nil { @@ -99,10 +91,26 @@ func (c *ControllerCommandConfig) StartController(ctx context.Context) error { // force the config to our version to read it configCopy.SetGroupVersionKind(operatorv1alpha1.GroupVersion.WithKind("GenericOperatorConfig")) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configCopy.Object, config); err != nil { - return err + return nil, nil, nil, err } } + return unstructuredConfig, config, configContent, nil +} + +func hasServiceServingCerts(certDir string) bool { + if _, err := os.Stat(filepath.Join(certDir, "tls.crt")); os.IsNotExist(err) { + return false + } + if _, err := os.Stat(filepath.Join(certDir, "tls.key")); os.IsNotExist(err) { + return false + } + return true +} +// AddDefaultRotationToConfig starts the provided builder with the default rotation set (config + serving info). Use StartController if +// you do not need to customize the controller builder. This method modifies config with self-signed default cert locations if +// necessary. +func (c *ControllerCommandConfig) AddDefaultRotationToConfig(config *operatorv1alpha1.GenericOperatorConfig, configContent []byte) (map[string][]byte, []string, error) { certDir := "/var/run/secrets/serving-cert" observedFiles := []string{ @@ -134,7 +142,7 @@ func (c *ControllerCommandConfig) StartController(ctx context.Context) error { klog.Warningf("Using insecure, self-signed certificates") temporaryCertDir, err := ioutil.TempDir("", "serving-cert-") if err != nil { - return err + return nil, nil, err } signerName := fmt.Sprintf("%s-signer@%d", c.componentName, time.Now().Unix()) ca, err := crypto.MakeSelfSignedCA( @@ -145,7 +153,7 @@ func (c *ControllerCommandConfig) StartController(ctx context.Context) error { 0, ) if err != nil { - return err + return nil, nil, err } certDir = temporaryCertDir @@ -155,15 +163,15 @@ func (c *ControllerCommandConfig) StartController(ctx context.Context) error { // nothing can trust this, so we don't really care about hostnames servingCert, err := ca.MakeServerCert(sets.NewString("localhost"), 30) if err != nil { - return err + return nil, nil, err } if err := servingCert.WriteCertConfigFile(config.ServingInfo.CertFile, config.ServingInfo.KeyFile); err != nil { - return err + return nil, nil, err } crtContent := &bytes.Buffer{} keyContent := &bytes.Buffer{} if err := servingCert.WriteCertConfig(crtContent, keyContent); err != nil { - return err + return nil, nil, err } // If we generate our own certificates, then we want to specify empty content to avoid a starting race. This way, @@ -172,9 +180,23 @@ func (c *ControllerCommandConfig) StartController(ctx context.Context) error { startingFileContent[filepath.Join(certDir, "tls.key")] = keyContent.Bytes() } } + return startingFileContent, observedFiles, nil +} + +// StartController runs the controller. This is the recommend entrypoint when you don't need +// to customize the builder. +func (c *ControllerCommandConfig) StartController(ctx context.Context) error { + unstructuredConfig, config, configContent, err := c.Config() + if err != nil { + return err + } + + startingFileContent, observedFiles, err := c.AddDefaultRotationToConfig(config, configContent) + if err != nil { + return err + } exitOnChangeReactorCh := make(chan struct{}) - ctx2 := context.Background() ctx2, cancel := context.WithCancel(ctx) go func() { select { @@ -185,10 +207,11 @@ func (c *ControllerCommandConfig) StartController(ctx context.Context) error { } }() - return NewController(c.componentName, c.startFunc). + builder := NewController(c.componentName, c.startFunc). WithKubeConfigFile(c.basicFlags.KubeConfigFile, nil). WithLeaderElection(config.LeaderElection, "", c.componentName+"-lock"). WithServer(config.ServingInfo, config.Authentication, config.Authorization). - WithRestartOnChange(exitOnChangeReactorCh, startingFileContent, observedFiles...). - Run(unstructuredConfig, ctx2) + WithRestartOnChange(exitOnChangeReactorCh, startingFileContent, observedFiles...) + + return builder.Run(unstructuredConfig, ctx2) } diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 421a80026695..7919321f898e 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -794,7 +794,7 @@ func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } } diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go index 9bfb47935c6a..f0c854ef7f20 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go @@ -76,7 +76,7 @@ func newTestCACertificate(subject pkix.Name, serialNumber int64, validity metav1 KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } cert, err := signCertificate(caCert, caPublicKey, caCert, caPrivateKey) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go index c80a50ead000..9a773c2d4c2b 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go @@ -50,6 +50,7 @@ func (c CABundleRotation) ensureConfigMapCABundle(signingCertKeyPair *crypto.CA) } if originalCABundleConfigMap == nil || originalCABundleConfigMap.Data == nil || !equality.Semantic.DeepEqual(originalCABundleConfigMap.Data, caBundleConfigMap.Data) { c.EventRecorder.Eventf("CABundleUpdateRequired", "%q in %q requires a new cert", c.Name, c.Namespace) + LabelAsManagedConfigMap(caBundleConfigMap, CertificateTypeCABundle) actualCABundleConfigMap, modified, err := resourceapply.ApplyConfigMap(c.Client, c.EventRecorder, caBundleConfigMap) if err != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go index 9f384d872045..d08a58a7b103 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go @@ -57,6 +57,9 @@ func TestEnsureConfigMapCABundle(t *testing.T) { } actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.ConfigMap) + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } if len(actual.Data["ca-bundle.crt"]) == 0 { t.Error(actual.Data) } @@ -97,6 +100,9 @@ func TestEnsureConfigMapCABundle(t *testing.T) { if len(actual.Data["ca-bundle.crt"]) == 0 { t.Error(actual.Data) } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } result, err := cert.ParseCertsPEM([]byte(actual.Data["ca-bundle.crt"])) if err != nil { t.Fatal(err) @@ -141,6 +147,9 @@ func TestEnsureConfigMapCABundle(t *testing.T) { if len(actual.Data["ca-bundle.crt"]) == 0 { t.Error(actual.Data) } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } result, err := cert.ParseCertsPEM([]byte(actual.Data["ca-bundle.crt"])) if err != nil { t.Fatal(err) @@ -189,6 +198,9 @@ func TestEnsureConfigMapCABundle(t *testing.T) { if len(actual.Data["ca-bundle.crt"]) == 0 { t.Error(actual.Data) } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } result, err := cert.ParseCertsPEM([]byte(actual.Data["ca-bundle.crt"])) if err != nil { t.Fatal(err) @@ -256,7 +268,7 @@ func newTestCACertificate(subject pkix.Name, serialNumber int64, validity metav1 KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } cert, err := signCertificate(caCert, caPublicKey, caCert, caPrivateKey) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go index 4d2ecf3243d4..8f75dc7db9d7 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go @@ -43,10 +43,8 @@ type CertRotationController struct { TargetRotation TargetRotation OperatorClient v1helpers.StaticPodOperatorClient - cachesSynced []cache.InformerSynced - - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface } func NewCertRotationController( @@ -56,7 +54,7 @@ func NewCertRotationController( targetRotation TargetRotation, operatorClient v1helpers.StaticPodOperatorClient, ) (*CertRotationController, error) { - ret := &CertRotationController{ + c := &CertRotationController{ name: name, SigningRotation: signingRotation, @@ -64,27 +62,25 @@ func NewCertRotationController( TargetRotation: targetRotation, OperatorClient: operatorClient, - cachesSynced: []cache.InformerSynced{ - signingRotation.Informer.Informer().HasSynced, - caBundleRotation.Informer.Informer().HasSynced, - targetRotation.Informer.Informer().HasSynced, - }, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), } - signingRotation.Informer.Informer().AddEventHandler(ret.eventHandler()) - caBundleRotation.Informer.Informer().AddEventHandler(ret.eventHandler()) - targetRotation.Informer.Informer().AddEventHandler(ret.eventHandler()) + signingRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + caBundleRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + targetRotation.Informer.Informer().AddEventHandler(c.eventHandler()) - return ret, nil + c.cachesToSync = append(c.cachesToSync, signingRotation.Informer.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, caBundleRotation.Informer.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, targetRotation.Informer.Informer().HasSynced) + + return c, nil } func (c CertRotationController) sync() error { syncErr := c.syncWorker() condition := operatorv1.OperatorCondition{ - Type: "CertRotation_" + c.name + "_Failing", + Type: "CertRotation_" + c.name + "_Degraded", Status: operatorv1.ConditionFalse, } if syncErr != nil { @@ -123,8 +119,7 @@ func (c *CertRotationController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting CertRotationController - %q", c.name) defer klog.Infof("Shutting down CertRotationController - %q", c.name) - - if !cache.WaitForCacheSync(stopCh, c.cachesSynced...) { + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { utilruntime.HandleError(fmt.Errorf("caches did not sync")) return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go new file mode 100644 index 000000000000..9c0df4ce5451 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go @@ -0,0 +1,61 @@ +package certrotation + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // ManagedCertificateTypeLabelName marks config map or secret as object that contains managed certificates. + // This groups all objects that store certs and allow easy query to get them all. + // The value of this label should be set to "true". + ManagedCertificateTypeLabelName = "auth.openshift.io/managed-certificate-type" +) + +type CertificateType string + +var ( + CertificateTypeCABundle CertificateType = "ca-bundle" + CertificateTypeSigner CertificateType = "signer" + CertificateTypeTarget CertificateType = "target" + CertificateTypeUnknown CertificateType = "unknown" +) + +// LabelAsManagedConfigMap add label indicating the given config map contains certificates +// that are managed. +func LabelAsManagedConfigMap(config *v1.ConfigMap, certificateType CertificateType) { + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[ManagedCertificateTypeLabelName] = string(certificateType) +} + +// LabelAsManagedConfigMap add label indicating the given secret contains certificates +// that are managed. +func LabelAsManagedSecret(secret *v1.Secret, certificateType CertificateType) { + if secret.Labels == nil { + secret.Labels = map[string]string{} + } + secret.Labels[ManagedCertificateTypeLabelName] = string(certificateType) +} + +// CertificateTypeFromObject returns the CertificateType based on the annotations of the object. +func CertificateTypeFromObject(obj runtime.Object) (CertificateType, error) { + accesor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + actualLabels := accesor.GetLabels() + if actualLabels == nil { + return CertificateTypeUnknown, nil + } + + t := CertificateType(actualLabels[ManagedCertificateTypeLabelName]) + switch t { + case CertificateTypeCABundle, CertificateTypeSigner, CertificateTypeTarget: + return t, nil + default: + return CertificateTypeUnknown, nil + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go index 25ade71468a9..56ce2a3307a5 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go @@ -48,6 +48,8 @@ func (c SigningRotation) ensureSigningCertKeyPair() (*crypto.CA, error) { return nil, err } + LabelAsManagedSecret(signingCertKeyPairSecret, CertificateTypeSigner) + actualSigningCertKeyPairSecret, _, err := resourceapply.ApplySecret(c.Client, c.EventRecorder, signingCertKeyPairSecret) if err != nil { return nil, err diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go index c1cba2b316ff..209a07539341 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go @@ -43,6 +43,9 @@ func TestEnsureSigningCertKeyPair(t *testing.T) { } actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.Secret) + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeSigner { + t.Errorf("expected certificate type 'signer', got: %v", certType) + } if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { t.Error(actual.Data) } @@ -65,6 +68,9 @@ func TestEnsureSigningCertKeyPair(t *testing.T) { } actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.Secret) + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeSigner { + t.Errorf("expected certificate type 'signer', got: %v", certType) + } if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { t.Error(actual.Data) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go index 9c389a131b01..48d6efd8d5be 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go @@ -70,6 +70,8 @@ func (c TargetRotation) ensureTargetCertKeyPair(signingCertKeyPair *crypto.CA, c return err } + LabelAsManagedSecret(targetCertKeyPairSecret, CertificateTypeTarget) + actualTargetCertKeyPairSecret, _, err := resourceapply.ApplySecret(c.Client, c.EventRecorder, targetCertKeyPairSecret) if err != nil { return err diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go index 133b1242fb9e..448d8f793754 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go @@ -303,6 +303,10 @@ func TestEnsureTargetSignerCertKeyPair(t *testing.T) { t.Error(actual.Data) } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeTarget { + t.Errorf("expected certificate type 'target', got: %v", certType) + } + signingCertKeyPair, err := crypto.GetCAFromBytes(actual.Data["tls.crt"], actual.Data["tls.key"]) if err != nil { t.Error(actual.Data) @@ -342,6 +346,9 @@ func TestEnsureTargetSignerCertKeyPair(t *testing.T) { if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { t.Error(actual.Data) } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeTarget { + t.Errorf("expected certificate type 'target', got: %v", certType) + } signingCertKeyPair, err := crypto.GetCAFromBytes(actual.Data["tls.crt"], actual.Data["tls.key"]) if err != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go new file mode 100644 index 000000000000..65846c975a98 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go @@ -0,0 +1,157 @@ +package cloudprovider + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" +) + +const ( + cloudProviderConfFilePath = "/etc/kubernetes/static-pod-resources/configmaps/cloud-config/%s" + configNamespace = "openshift-config" +) + +// InfrastructureLister lists infrastrucre information and allows resources to be synced +type InfrastructureLister interface { + InfrastructureLister() configlistersv1.InfrastructureLister + ResourceSyncer() resourcesynccontroller.ResourceSyncer +} + +// NewCloudProviderObserver returns a new cloudprovider observer for syncing cloud provider specific +// information to controller-manager and api-server. +func NewCloudProviderObserver(targetNamespaceName string, cloudProviderNamePath, cloudProviderConfigPath []string) configobserver.ObserveConfigFunc { + cloudObserver := &cloudProviderObserver{ + targetNamespaceName: targetNamespaceName, + cloudProviderNamePath: cloudProviderNamePath, + cloudProviderConfigPath: cloudProviderConfigPath, + } + return cloudObserver.ObserveCloudProviderNames +} + +type cloudProviderObserver struct { + targetNamespaceName string + cloudProviderNamePath []string + cloudProviderConfigPath []string +} + +// ObserveCloudProviderNames observes the cloud provider from the global cluster infrastructure resource. +func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + listers := genericListers.(InfrastructureLister) + var errs []error + cloudProvidersPath := c.cloudProviderNamePath + cloudProviderConfPath := c.cloudProviderConfigPath + previouslyObservedConfig := map[string]interface{}{} + + existingCloudConfig, _, err := unstructured.NestedStringSlice(existingConfig, cloudProviderConfPath...) + if err != nil { + return previouslyObservedConfig, append(errs, err) + } + + if currentCloudProvider, _, _ := unstructured.NestedStringSlice(existingConfig, cloudProvidersPath...); len(currentCloudProvider) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, currentCloudProvider, cloudProvidersPath...); err != nil { + errs = append(errs, err) + } + } + + if len(existingCloudConfig) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, existingCloudConfig, cloudProviderConfPath...); err != nil { + errs = append(errs, err) + } + } + + observedConfig := map[string]interface{}{} + + infrastructure, err := listers.InfrastructureLister().Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveCloudProviderNames", "Required infrastructures.%s/cluster not found", configv1.GroupName) + return observedConfig, errs + } + if err != nil { + return previouslyObservedConfig, errs + } + + cloudProvider := getPlatformName(infrastructure.Status.Platform, recorder) + if len(cloudProvider) > 0 { + if err := unstructured.SetNestedStringSlice(observedConfig, []string{cloudProvider}, cloudProvidersPath...); err != nil { + errs = append(errs, err) + } + } + + sourceCloudConfigMap := infrastructure.Spec.CloudConfig.Name + sourceCloudConfigNamespace := configNamespace + sourceLocation := resourcesynccontroller.ResourceLocation{ + Namespace: sourceCloudConfigNamespace, + Name: sourceCloudConfigMap, + } + // we set cloudprovider configmap values only for vsphere. + if cloudProvider != "vsphere" { + sourceCloudConfigMap = "" + } + + if len(sourceCloudConfigMap) == 0 { + sourceLocation = resourcesynccontroller.ResourceLocation{} + } + + err = listers.ResourceSyncer().SyncConfigMap( + resourcesynccontroller.ResourceLocation{ + Namespace: c.targetNamespaceName, + Name: "cloud-config", + }, + sourceLocation) + + if err != nil { + errs = append(errs, err) + return observedConfig, errs + } + + if len(sourceCloudConfigMap) == 0 { + return observedConfig, errs + } + + // usually key will be simply config but we should refer it just in case + staticCloudConfFile := fmt.Sprintf(cloudProviderConfFilePath, infrastructure.Spec.CloudConfig.Key) + + if err := unstructured.SetNestedStringSlice(observedConfig, []string{staticCloudConfFile}, cloudProviderConfPath...); err != nil { + recorder.Warningf("ObserveCloudProviderNames", "Failed setting cloud-config : %v", err) + errs = append(errs, err) + } + + if !equality.Semantic.DeepEqual(existingCloudConfig, []string{staticCloudConfFile}) { + recorder.Eventf("ObserveCloudProviderNamesChanges", "CloudProvider config file changed to %s", staticCloudConfFile) + } + + return observedConfig, errs +} + +func getPlatformName(platformType configv1.PlatformType, recorder events.Recorder) string { + cloudProvider := "" + switch platformType { + case "": + recorder.Warningf("ObserveCloudProvidersFailed", "Required status.platform field is not set in infrastructures.%s/cluster", configv1.GroupName) + case configv1.AWSPlatformType: + cloudProvider = "aws" + case configv1.AzurePlatformType: + cloudProvider = "azure" + case configv1.VSpherePlatformType: + cloudProvider = "vsphere" + case configv1.LibvirtPlatformType: + case configv1.OpenStackPlatformType: + // TODO(flaper87): Enable this once we've figured out a way to write the cloud provider config in the master nodes + //cloudProvider = "openstack" + case configv1.NonePlatformType: + default: + // the new doc on the infrastructure fields requires that we treat an unrecognized thing the same bare metal. + // TODO find a way to indicate to the user that we didn't honor their choice + recorder.Warningf("ObserveCloudProvidersFailed", fmt.Sprintf("No recognized cloud provider platform found in infrastructures.%s/cluster.status.platform", configv1.GroupName)) + } + return cloudProvider +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go new file mode 100644 index 000000000000..1d801515ffe4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go @@ -0,0 +1,105 @@ +package cloudprovider + +import ( + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type FakeResourceSyncer struct{} + +func (fakeSyncer *FakeResourceSyncer) SyncConfigMap(destination, source resourcesynccontroller.ResourceLocation) error { + return nil +} + +func (fakeSyncer *FakeResourceSyncer) SyncSecret(destination, source resourcesynccontroller.ResourceLocation) error { + return nil +} + +type FakeInfrastructureLister struct { + InfrastructureLister_ configlistersv1.InfrastructureLister + ResourceSync resourcesynccontroller.ResourceSyncer + PreRunCachesSynced []cache.InformerSynced +} + +func (l FakeInfrastructureLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return l.ResourceSync +} + +func (l FakeInfrastructureLister) InfrastructureLister() configlistersv1.InfrastructureLister { + return l.InfrastructureLister_ +} + +func (l FakeInfrastructureLister) PreRunHasSynced() []cache.InformerSynced { + return l.PreRunCachesSynced +} + +func TestObserveCloudProviderNames(t *testing.T) { + cases := []struct { + platform configv1.PlatformType + expected string + cloudProviderCount int + }{{ + platform: configv1.AWSPlatformType, + expected: "aws", + cloudProviderCount: 1, + }, { + platform: configv1.AzurePlatformType, + expected: "azure", + cloudProviderCount: 1, + }, { + platform: configv1.LibvirtPlatformType, + cloudProviderCount: 0, + }, { + platform: configv1.OpenStackPlatformType, + cloudProviderCount: 0, + }, { + platform: configv1.GCPPlatformType, + cloudProviderCount: 0, + }, { + platform: configv1.NonePlatformType, + cloudProviderCount: 0, + }, { + platform: "", + cloudProviderCount: 0, + }} + for _, c := range cases { + t.Run(string(c.platform), func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if err := indexer.Add(&configv1.Infrastructure{ObjectMeta: v1.ObjectMeta{Name: "cluster"}, Status: configv1.InfrastructureStatus{Platform: c.platform}}); err != nil { + t.Fatal(err.Error()) + } + listers := FakeInfrastructureLister{ + InfrastructureLister_: configlistersv1.NewInfrastructureLister(indexer), + ResourceSync: &FakeResourceSyncer{}, + } + cloudProvidersPath := []string{"extendedArguments", "cloud-provider"} + cloudProviderConfPath := []string{"extendedArguments", "cloud-config"} + observerFunc := NewCloudProviderObserver("kube-controller-manager", cloudProvidersPath, cloudProviderConfPath) + result, errs := observerFunc(listers, events.NewInMemoryRecorder("cloud"), map[string]interface{}{}) + if len(errs) > 0 { + t.Fatal(errs) + } + cloudProvider, _, err := unstructured.NestedSlice(result, "extendedArguments", "cloud-provider") + if err != nil { + t.Fatal(err) + } + if e, a := c.cloudProviderCount, len(cloudProvider); e != a { + t.Fatalf("expected len(cloudProvider) == %d, got %d", e, a) + } + if c.cloudProviderCount > 0 { + if e, a := c.expected, cloudProvider[0]; e != a { + t.Errorf("expected cloud-provider=%s, got %s", e, a) + } + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go index e5badf2aec47..2c566647fdb6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -24,7 +24,7 @@ import ( "github.com/openshift/library-go/pkg/operator/v1helpers" ) -const operatorStatusTypeConfigObservationFailing = "ConfigObservationFailing" +const operatorStatusTypeConfigObservationDegraded = "ConfigObservationDegraded" const configObserverWorkKey = "key" // Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type @@ -41,18 +41,17 @@ type Listers interface { type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) type ConfigObserver struct { - operatorConfigClient v1helpers.OperatorClient - eventRecorder events.Recorder - - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface // observers are called in an undefined order and their results are merged to // determine the observed configuration. observers []ObserveConfigFunc + operatorClient v1helpers.OperatorClient // listers are used by config observers to retrieve necessary resources listers Listers + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } func NewConfigObserver( @@ -62,8 +61,8 @@ func NewConfigObserver( observers ...ObserveConfigFunc, ) *ConfigObserver { return &ConfigObserver{ - operatorConfigClient: operatorClient, - eventRecorder: eventRecorder.WithComponentSuffix("config-observer"), + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("config-observer"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ConfigObserver"), @@ -75,7 +74,7 @@ func NewConfigObserver( // sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This // must be information that is logically "owned" by another component. func (c ConfigObserver) sync() error { - originalSpec, _, _, err := c.operatorConfigClient.GetOperatorState() + originalSpec, _, _, err := c.operatorClient.GetOperatorState() if err != nil { return err } @@ -116,7 +115,7 @@ func (c ConfigObserver) sync() error { if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) { c.eventRecorder.Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig)) - if _, _, err := v1helpers.UpdateSpec(c.operatorConfigClient, v1helpers.UpdateObservedConfigFn(mergedObservedConfig)); err != nil { + if _, _, err := v1helpers.UpdateSpec(c.operatorClient, v1helpers.UpdateObservedConfigFn(mergedObservedConfig)); err != nil { // At this point we failed to write the updated config. If we are permanently broken, do not pile the errors from observers // but instead reset the errors and only report single error condition. errs = []error{fmt.Errorf("error writing updated observed config: %v", err)} @@ -127,7 +126,7 @@ func (c ConfigObserver) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationFailing, + Type: operatorStatusTypeConfigObservationDegraded, Status: operatorv1.ConditionFalse, } if configError != nil { @@ -135,7 +134,7 @@ func (c ConfigObserver) sync() error { cond.Reason = "Error" cond.Message = configError.Error() } - if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { return updateError } @@ -148,7 +147,6 @@ func (c *ConfigObserver) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting ConfigObserver") defer klog.Infof("Shutting down ConfigObserver") - if !cache.WaitForCacheSync(stopCh, c.listers.PreRunHasSynced()...) { utilruntime.HandleError(fmt.Errorf("caches did not sync")) return diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go index f84a3e5b2444..0b28510a869c 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go @@ -103,7 +103,7 @@ func TestSyncStatus(t *testing.T) { "baz": "three", }}, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationFailing, + Type: operatorStatusTypeConfigObservationDegraded, Status: operatorv1.ConditionFalse, }, }, @@ -136,7 +136,7 @@ func TestSyncStatus(t *testing.T) { "bar": "two", }}, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationFailing, + Type: operatorStatusTypeConfigObservationDegraded, Status: operatorv1.ConditionTrue, Reason: "Error", Message: "some failure", @@ -163,7 +163,7 @@ func TestSyncStatus(t *testing.T) { expectError: true, expectedObservedConfig: nil, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationFailing, + Type: operatorStatusTypeConfigObservationDegraded, Status: operatorv1.ConditionTrue, Reason: "Error", Message: "error writing updated observed config: update spec failure", @@ -190,7 +190,7 @@ func TestSyncStatus(t *testing.T) { expectError: true, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationFailing, + Type: operatorStatusTypeConfigObservationDegraded, Status: operatorv1.ConditionTrue, Reason: "Error", Message: "non-deterministic config observation detected", @@ -204,10 +204,10 @@ func TestSyncStatus(t *testing.T) { eventClient := fake.NewSimpleClientset() configObserver := ConfigObserver{ - listers: &fakeLister{}, - operatorConfigClient: operatorConfigClient, - observers: tc.observers, - eventRecorder: events.NewRecorder(eventClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}), + listers: &fakeLister{}, + operatorClient: operatorConfigClient, + observers: tc.observers, + eventRecorder: events.NewRecorder(eventClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}), } err := configObserver.sync() if tc.expectError && err == nil { @@ -250,7 +250,7 @@ func TestSyncStatus(t *testing.T) { case tc.expectedCondition != nil && operatorConfigClient.status == nil: t.Error("missing expected status") case tc.expectedCondition != nil: - condition := v1helpers.FindOperatorCondition(operatorConfigClient.status.Conditions, operatorStatusTypeConfigObservationFailing) + condition := v1helpers.FindOperatorCondition(operatorConfigClient.status.Conditions, operatorStatusTypeConfigObservationDegraded) condition.LastTransitionTime = tc.expectedCondition.LastTransitionTime if !reflect.DeepEqual(tc.expectedCondition, condition) { t.Fatalf("\n===== condition expected:\n%v\n===== condition actual:\n%v", toYAML(tc.expectedCondition), toYAML(condition)) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go new file mode 100644 index 000000000000..dd27886694a0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go @@ -0,0 +1,97 @@ +package featuregates + +import ( + "fmt" + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +type FeatureGateLister interface { + FeatureGateLister() configlistersv1.FeatureGateLister +} + +func NewObserveFeatureFlagsFunc(knownFeatures sets.String, configPath []string) configobserver.ObserveConfigFunc { + return (&featureFlags{ + allowAll: len(knownFeatures) == 0, + knownFeatures: knownFeatures, + configPath: configPath, + }).ObserveFeatureFlags +} + +type featureFlags struct { + allowAll bool + knownFeatures sets.String + configPath []string +} + +// ObserveFeatureFlags fills in --feature-flags for the kube-apiserver +func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + listers := genericListers.(FeatureGateLister) + errs := []error{} + prevObservedConfig := map[string]interface{}{} + + currentConfigValue, _, err := unstructured.NestedStringSlice(existingConfig, f.configPath...) + if err != nil { + errs = append(errs, err) + } + if len(currentConfigValue) > 0 { + if err := unstructured.SetNestedStringSlice(prevObservedConfig, currentConfigValue, f.configPath...); err != nil { + errs = append(errs, err) + } + } + + observedConfig := map[string]interface{}{} + configResource, err := listers.FeatureGateLister().Get("cluster") + // if we have no featuregate, then the installer and MCO probably still have way to reconcile certain custom resources + // we will assume that this means the same as default and hope for the best + if apierrors.IsNotFound(err) { + configResource = &configv1.FeatureGate{ + Spec: configv1.FeatureGateSpec{ + FeatureSet: configv1.Default, + }, + } + } else if err != nil { + errs = append(errs, err) + return prevObservedConfig, errs + } + + var newConfigValue []string + if featureSet, ok := configv1.FeatureSets[configResource.Spec.FeatureSet]; ok { + for _, enable := range featureSet.Enabled { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(enable) { + continue + } + newConfigValue = append(newConfigValue, enable+"=true") + } + for _, disable := range featureSet.Disabled { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(disable) { + continue + } + newConfigValue = append(newConfigValue, disable+"=false") + } + } else { + errs = append(errs, fmt.Errorf(".spec.featureSet %q not found", featureSet)) + return prevObservedConfig, errs + } + if !reflect.DeepEqual(currentConfigValue, newConfigValue) { + recorder.Eventf("ObserveFeatureFlagsUpdated", "Updated %v to %s", strings.Join(f.configPath, "."), strings.Join(newConfigValue, ",")) + } + + if err := unstructured.SetNestedStringSlice(observedConfig, newConfigValue, f.configPath...); err != nil { + recorder.Warningf("ObserveFeatureFlags", "Failed setting %v: %v", strings.Join(f.configPath, "."), err) + errs = append(errs, err) + } + + return observedConfig, errs +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go new file mode 100644 index 000000000000..371550ca364b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go @@ -0,0 +1,97 @@ +package featuregates + +import ( + "reflect" + "testing" + + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" +) + +type testLister struct { + lister configlistersv1.FeatureGateLister +} + +func (l testLister) FeatureGateLister() configlistersv1.FeatureGateLister { + return l.lister +} + +func (l testLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l testLister) PreRunHasSynced() []cache.InformerSynced { + return nil +} + +func TestObserveFeatureFlags(t *testing.T) { + configPath := []string{"foo", "bar"} + + tests := []struct { + name string + + configValue configv1.FeatureSet + expectedResult []string + }{ + { + name: "default", + configValue: configv1.Default, + expectedResult: []string{ + "ExperimentalCriticalPodAnnotation=true", + "RotateKubeletServerCertificate=true", + "SupportPodPidsLimit=true", + "LocalStorageCapacityIsolation=false", + }, + }, + { + name: "techpreview", + configValue: configv1.TechPreviewNoUpgrade, + expectedResult: []string{ + "ExperimentalCriticalPodAnnotation=true", + "RotateKubeletServerCertificate=true", + "SupportPodPidsLimit=true", + "CSIBlockVolume=true", + "LocalStorageCapacityIsolation=false", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(&configv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.FeatureGateSpec{ + FeatureSet: tc.configValue, + }, + }) + listers := testLister{ + lister: configlistersv1.NewFeatureGateLister(indexer), + } + eventRecorder := events.NewInMemoryRecorder("") + + initialExistingConfig := map[string]interface{}{} + + observeFn := NewObserveFeatureFlagsFunc(nil, configPath) + + observed, errs := observeFn(listers, eventRecorder, initialExistingConfig) + if len(errs) != 0 { + t.Fatal(errs) + } + actual, _, err := unstructured.NestedStringSlice(observed, configPath...) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !reflect.DeepEqual(tc.expectedResult, actual) { + t.Errorf("%v", actual) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go index 8bed7bab6cd1..03bceede8f0b 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -44,6 +44,8 @@ var podNameEnvFunc = func() string { // GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. // The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { if reference == nil { // Try to get the pod name via POD_NAME environment variable @@ -54,7 +56,10 @@ func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetName // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list reference, err := guessControllerReferenceForNamespace(client.CoreV1().Pods(targetNamespace)) if err != nil { - return nil, err + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err } return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) } @@ -63,7 +68,7 @@ func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetName case "Pod": pod, err := client.CoreV1().Pods(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return getControllerReferenceForNamespace(reference.Namespace), err } if podController := metav1.GetControllerOf(pod); podController != nil { return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(podController, targetNamespace)) @@ -73,7 +78,7 @@ func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetName case "ReplicaSet": rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) if err != nil { - return nil, err + return getControllerReferenceForNamespace(reference.Namespace), err } if rsController := metav1.GetControllerOf(rs); rsController != nil { return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(rsController, targetNamespace)) @@ -85,6 +90,16 @@ func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetName } } +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + // makeObjectReference makes object reference from ownerReference and target namespace func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { return &corev1.ObjectReference{ diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go index b3fe15ffda81..29af896d3971 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go @@ -163,3 +163,24 @@ func TestGetControllerReferenceForCurrentPod(t *testing.T) { t.Errorf("expected objectReference to be Deployment, got %q", objectReference.GroupVersionKind().String()) } } + +func TestGetControllerReferenceForCurrentPodFallbackNamespace(t *testing.T) { + client := fake.NewSimpleClientset() + + podNameEnvFunc = func() string { + return "test" + } + + objectReference, err := GetControllerReferenceForCurrentPod(client, "test", nil) + if err == nil { + t.Fatalf("expected error: %v", err) + } + + if objectReference.Name != "test" { + t.Errorf("expected objectReference name to be 'test', got %q", objectReference.Name) + } + + if objectReference.GroupVersionKind().String() != "/v1, Kind=Namespace" { + t.Errorf("expected objectReference to be Namespace, got %q", objectReference.GroupVersionKind().String()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go new file mode 100644 index 000000000000..fbfe7e33f011 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go @@ -0,0 +1,121 @@ +package loglevel + +import ( + "fmt" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +type LogLevelController struct { + operatorClient operatorv1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// sets the klog level based on desired state +func NewClusterOperatorLoggingController( + operatorClient operatorv1helpers.OperatorClient, + recorder events.Recorder, +) *LogLevelController { + c := &LogLevelController{ + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("loglevel-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "LoggingSyncer"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c LogLevelController) sync() error { + detailedSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + logLevel := fmt.Sprintf("%d", LogLevelToKlog(detailedSpec.OperatorLogLevel)) + + var level klog.Level + + oldLevel, ok := level.Get().(klog.Level) + if !ok { + oldLevel = level + } + + if err := level.Set(logLevel); err != nil { + c.eventRecorder.Warningf("LoglevelChangeFailed", "Unable to set loglevel level %v", err) + return err + } + + if oldLevel.String() != logLevel { + c.eventRecorder.Eventf("LoglevelChange", "Changed loglevel level to %q", logLevel) + } + return nil +} + +func (c *LogLevelController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting LogLevelController") + defer klog.Infof("Shutting down LogLevelController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *LogLevelController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *LogLevelController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and loglevel +func (c *LogLevelController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go new file mode 100644 index 000000000000..d6b94279768d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go @@ -0,0 +1,18 @@ +package loglevel + +import operatorv1 "github.com/openshift/api/operator/v1" + +func LogLevelToKlog(logLevel operatorv1.LogLevel) int { + switch logLevel { + case operatorv1.Normal: + return 2 + case operatorv1.Debug: + return 4 + case operatorv1.Trace: + return 6 + case operatorv1.TraceAll: + return 8 + default: + return 2 + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go index f62395ad6c8e..71e4f4cf7a1d 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go @@ -27,27 +27,28 @@ var workQueueKey = "instance" type ManagementStateController struct { operatorName string operatorClient operatorv1helpers.OperatorClient - eventRecorder events.Recorder - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } func NewOperatorManagementStateController( name string, - operatorStatusProvider operatorv1helpers.OperatorClient, + operatorClient operatorv1helpers.OperatorClient, recorder events.Recorder, ) *ManagementStateController { c := &ManagementStateController{ operatorName: name, - operatorClient: operatorStatusProvider, + operatorClient: operatorClient, eventRecorder: recorder, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ManagementStateController-"+name), } - operatorStatusProvider.Informer().AddEventHandler(c.eventHandler()) - // TODO watch clusterOperator.status changes when it moves to openshift/api + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) return c } @@ -60,7 +61,7 @@ func (c ManagementStateController) sync() error { } cond := operatorv1.OperatorCondition{ - Type: "ManagementStateFailing", + Type: "ManagementStateDegraded", Status: operatorv1.ConditionFalse, } @@ -97,6 +98,9 @@ func (c *ManagementStateController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting management-state-controller-" + c.operatorName) defer klog.Infof("Shutting down management-state-controller-" + c.operatorName) + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go index 900b8ea508b0..48d3ce6264e7 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go @@ -83,12 +83,12 @@ func TestOperatorManagementStateController(t *testing.T) { _, result, _, _ := statusClient.GetOperatorState() - if tc.expectedFailingStatus && result.Conditions[0].Type == "ManagementStateFailing" && result.Conditions[0].Status == operatorv1.ConditionFalse { + if tc.expectedFailingStatus && result.Conditions[0].Type == "ManagementStateDegraded" && result.Conditions[0].Status == operatorv1.ConditionFalse { t.Errorf("expected failing conditions") return } - if !tc.expectedFailingStatus && result.Conditions[0].Type == "ManagementStateFailing" && result.Conditions[0].Status != operatorv1.ConditionFalse { + if !tc.expectedFailingStatus && result.Conditions[0].Type == "ManagementStateDegraded" && result.Conditions[0].Status != operatorv1.ConditionFalse { t.Errorf("unexpected failing conditions: %#v", result.Conditions) return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go index 82a102ba17c8..870b7ceb6422 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -230,6 +230,9 @@ func SyncConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorde switch { case apierrors.IsNotFound(err): deleteErr := client.ConfigMaps(targetNamespace).Delete(targetName, nil) + if _, getErr := client.ConfigMaps(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } if apierrors.IsNotFound(deleteErr) { return nil, false, nil } @@ -253,6 +256,9 @@ func SyncSecret(client coreclientv1.SecretsGetter, recorder events.Recorder, sou source, err := client.Secrets(sourceNamespace).Get(sourceName, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): + if _, getErr := client.Secrets(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } deleteErr := client.Secrets(targetNamespace).Delete(targetName, nil) if apierrors.IsNotFound(deleteErr) { return nil, false, nil diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go index 09d84a1c7abc..f5a26338b738 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go @@ -8,15 +8,13 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/util/cert" "github.com/openshift/library-go/pkg/crypto" - "github.com/openshift/library-go/pkg/operator/events" ) -func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, client corev1client.ConfigMapsGetter, recorder events.Recorder, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) { +func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) { certificates := []*x509.Certificate{} for _, input := range inputConfigMaps { inputConfigMap, err := lister.ConfigMaps(input.Namespace).Get(input.Name) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go index aa2f5e67b360..4d4cc4f127b0 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -28,8 +28,8 @@ import ( ) const ( - operatorStatusResourceSyncControllerFailing = "ResourceSyncControllerFailing" - controllerWorkQueueKey = "key" + operatorStatusResourceSyncControllerDegraded = "ResourceSyncControllerDegraded" + controllerWorkQueueKey = "key" ) // ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. @@ -45,16 +45,14 @@ type ResourceSyncController struct { // knownNamespaces is the list of namespaces we are watching. knownNamespaces sets.String - preRunCachesSynced []cache.InformerSynced - - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface - configMapGetter corev1client.ConfigMapsGetter secretGetter corev1client.SecretsGetter kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces operatorConfigClient v1helpers.OperatorClient - eventRecorder events.Recorder + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } var _ ResourceSyncer = &ResourceSyncController{} @@ -88,13 +86,16 @@ func NewResourceSyncController( informers := kubeInformersForNamespaces.InformersFor(namespace) informers.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) informers.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) - c.preRunCachesSynced = append(c.preRunCachesSynced, informers.Core().V1().ConfigMaps().Informer().HasSynced) - c.preRunCachesSynced = append(c.preRunCachesSynced, informers.Core().V1().Secrets().Informer().HasSynced) + + c.cachesToSync = append(c.cachesToSync, informers.Core().V1().ConfigMaps().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, informers.Core().V1().Secrets().Informer().HasSynced) } // we watch this just in case someone messes with our status operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorConfigClient.Informer().HasSynced) + return c } @@ -150,7 +151,10 @@ func (c *ResourceSyncController) sync() error { for destination, source := range c.configMapSyncRules { if source == emptyResourceLocation { // use the cache to check whether the configmap exists in target namespace, if not skip the extra delete call. - if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { + if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } continue } if err := c.configMapGetter.ConfigMaps(destination.Namespace).Delete(destination.Name, nil); err != nil && !apierrors.IsNotFound(err) { @@ -167,7 +171,10 @@ func (c *ResourceSyncController) sync() error { for destination, source := range c.secretSyncRules { if source == emptyResourceLocation { // use the cache to check whether the secret exists in target namespace, if not skip the extra delete call. - if _, err := c.secretGetter.Secrets(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { + if _, err := c.secretGetter.Secrets(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } continue } if err := c.secretGetter.Secrets(destination.Namespace).Delete(destination.Name, nil); err != nil && !apierrors.IsNotFound(err) { @@ -184,7 +191,7 @@ func (c *ResourceSyncController) sync() error { if len(errors) > 0 { cond := operatorv1.OperatorCondition{ - Type: operatorStatusResourceSyncControllerFailing, + Type: operatorStatusResourceSyncControllerDegraded, Status: operatorv1.ConditionTrue, Reason: "Error", Message: v1helpers.NewMultiLineAggregate(errors).Error(), @@ -196,7 +203,7 @@ func (c *ResourceSyncController) sync() error { } cond := operatorv1.OperatorCondition{ - Type: operatorStatusResourceSyncControllerFailing, + Type: operatorStatusResourceSyncControllerDegraded, Status: operatorv1.ConditionFalse, } if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { @@ -211,7 +218,7 @@ func (c *ResourceSyncController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting ResourceSyncController") defer klog.Infof("Shutting down ResourceSyncController") - if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go index 6920f3246486..6a7b081f2bab 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go @@ -3,9 +3,15 @@ package resourcesynccontroller import ( "net/http" "net/http/httptest" + "sync" "testing" "time" + "k8s.io/apimachinery/pkg/runtime" + ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" "github.com/openshift/library-go/pkg/operator/v1helpers" corev1 "k8s.io/api/core/v1" @@ -15,9 +21,144 @@ import ( "k8s.io/client-go/kubernetes/fake" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" ) +func TestSyncSecret(t *testing.T) { + kubeClient := fake.NewSimpleClientset( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "config", Name: "foo"}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "operator", Name: "to-remove"}, + }, + ) + + destinationSecretCreated := make(chan struct{}) + destinationSecretBarChecked := make(chan struct{}) + destinationSecretEmptySourceChecked := make(chan struct{}) + + kubeClient.PrependReactor("create", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + actual, isCreate := action.(ktesting.CreateAction) + if !isCreate { + return false, nil, nil + } + secret, isSecret := actual.GetObject().(*corev1.Secret) + if !isSecret { + return false, nil, nil + } + if secret.Name == "foo" && secret.Namespace == "operator" { + close(destinationSecretCreated) + } + return false, nil, nil + }) + + deleteSecretCounterMutex := sync.Mutex{} + deleteSecretCounter := 0 + + kubeClient.PrependReactor("delete", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + deleteSecretCounterMutex.Lock() + defer deleteSecretCounterMutex.Unlock() + deleteSecretCounter++ + return false, nil, nil + }) + + kubeClient.PrependReactor("get", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + actual, isGet := action.(ktesting.GetAction) + if !isGet { + return false, nil, nil + } + if actual.GetNamespace() == "operator" { + switch actual.GetName() { + case "bar": + close(destinationSecretBarChecked) + case "empty-source": + close(destinationSecretEmptySourceChecked) + } + } + return false, nil, nil + }) + + secretInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("config")) + operatorInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("operator")) + fakeStaticPodOperatorClient := v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + &operatorv1.OperatorStatus{}, + nil, + ) + eventRecorder := eventstesting.NewTestingEventRecorder(t) + c := NewResourceSyncController( + fakeStaticPodOperatorClient, + v1helpers.NewFakeKubeInformersForNamespaces(map[string]informers.SharedInformerFactory{ + "config": secretInformers, + "operator": operatorInformers, + }), + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + c.cachesToSync = []cache.InformerSynced{ + secretInformers.Core().V1().Secrets().Informer().HasSynced, + } + c.configMapGetter = kubeClient.CoreV1() + c.secretGetter = kubeClient.CoreV1() + + stopCh := make(chan struct{}) + defer close(stopCh) + + go secretInformers.Start(stopCh) + go c.Run(1, stopCh) + + // The source secret was removed (404) but the destination exists. This should increase the "deleteSecretCounter" + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "to-remove"}, ResourceLocation{Namespace: "config", Name: "removed"}); err != nil { + t.Fatal(err) + } + + // The source secret exists, but the destination does not. This should close the "destinationSecretCreated" channel + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "foo"}, ResourceLocation{Namespace: "config", Name: "foo"}); err != nil { + t.Fatal(err) + } + + // The source secret does not exists nor the destination secret. This should close the "destinationSecretBarChecked" and should not increase + // the deleteSecretCounter (we don't issue Delete() call when Get() returns 404) + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "bar"}, ResourceLocation{Namespace: "config", Name: "bar"}); err != nil { + t.Fatal(err) + } + + // The source resource location is not set and the destination does not exists. This should close the "destinationSecretEmptySourceChecked" and + // should not increase the deleteSecretCounter (this is special case in resource sync controller. + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "empty-source"}, ResourceLocation{}); err != nil { + t.Fatal(err) + } + + select { + case <-destinationSecretCreated: + case <-time.After(10 * time.Second): + t.Fatal("timeout while waiting for destination secret to be created") + } + + select { + case <-destinationSecretBarChecked: + case <-time.After(10 * time.Second): + t.Fatal("timeout while waiting for destination secret 'bar' to be checked for existence") + } + + select { + case <-destinationSecretEmptySourceChecked: + case <-time.After(10 * time.Second): + t.Fatal("timeout while waiting for destination secret 'empty-source' to be checked for existence") + } + + deleteSecretCounterMutex.Lock() + defer deleteSecretCounterMutex.Unlock() + if deleteSecretCounter != 1 { + t.Fatalf("expected exactly 1 delete call for this test, got %d", deleteSecretCounter) + } +} + func TestSyncConfigMap(t *testing.T) { kubeClient := fake.NewSimpleClientset( &corev1.ConfigMap{ diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go index 1d72ee9e2ed1..9c55363dfe77 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go @@ -27,59 +27,55 @@ import ( ) const ( - operatorStatusBackingResourceControllerFailing = "BackingResourceControllerFailing" - controllerWorkQueueKey = "key" - manifestDir = "pkg/operator/staticpod/controller/backingresource" + operatorStatusBackingResourceControllerDegraded = "BackingResourceControllerDegraded" + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/backingresource" ) // BackingResourceController is a controller that watches the operator config and updates // service accounts and RBAC rules in the target namespace according to the bindata manifests // (templated with the config) if they differ. type BackingResourceController struct { - targetNamespace string - operatorConfigClient v1helpers.OperatorClient + targetNamespace string - saListerSynced cache.InformerSynced - saLister corelisterv1.ServiceAccountLister + operatorClient v1helpers.OperatorClient + saLister corelisterv1.ServiceAccountLister + clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister + kubeClient kubernetes.Interface - clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister - clusterRoleBindingListerSynced cache.InformerSynced - - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface - - kubeClient kubernetes.Interface + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface eventRecorder events.Recorder } // NewBackingResourceController creates a new backing resource controller. func NewBackingResourceController( targetNamespace string, - operatorConfigClient v1helpers.OperatorClient, + operatorClient v1helpers.OperatorClient, kubeInformersForTargetNamespace informers.SharedInformerFactory, kubeClient kubernetes.Interface, eventRecorder events.Recorder, ) *BackingResourceController { c := &BackingResourceController{ - targetNamespace: targetNamespace, - operatorConfigClient: operatorConfigClient, - eventRecorder: eventRecorder.WithComponentSuffix("backing-resource-controller"), + targetNamespace: targetNamespace, + operatorClient: operatorClient, - saListerSynced: kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced, - saLister: kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Lister(), + saLister: kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Lister(), + clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), + kubeClient: kubeClient, - clusterRoleBindingListerSynced: kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced, - clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), - - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "BackingResourceController"), - kubeClient: kubeClient, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "BackingResourceController"), + eventRecorder: eventRecorder.WithComponentSuffix("backing-resource-controller"), } - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) - + operatorClient.Informer().AddEventHandler(c.eventHandler()) kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().AddEventHandler(c.eventHandler()) kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().HasSynced) + return c } @@ -93,7 +89,7 @@ func (c BackingResourceController) mustTemplateAsset(name string) ([]byte, error } func (c BackingResourceController) sync() error { - operatorSpec, _, _, err := c.operatorConfigClient.GetOperatorState() + operatorSpec, _, _, err := c.operatorClient.GetOperatorState() if err != nil { return err } @@ -117,7 +113,7 @@ func (c BackingResourceController) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusBackingResourceControllerFailing, + Type: operatorStatusBackingResourceControllerDegraded, Status: operatorv1.ConditionFalse, } if err != nil { @@ -125,7 +121,7 @@ func (c BackingResourceController) sync() error { cond.Reason = "Error" cond.Message = err.Error() } - if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { if err == nil { return updateError } @@ -141,10 +137,7 @@ func (c *BackingResourceController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting BackingResourceController") defer klog.Infof("Shutting down BackingResourceController") - if !cache.WaitForCacheSync(stopCh, c.saListerSynced) { - return - } - if !cache.WaitForCacheSync(stopCh, c.clusterRoleBindingListerSynced) { + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go index f539655a0796..9f08361ebca9 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go @@ -154,7 +154,7 @@ func TestBackingResourceController(t *testing.T) { ), expectSyncError: `test error`, validateStatus: func(t *testing.T, status *operatorv1.OperatorStatus) { - if status.Conditions[0].Type != operatorStatusBackingResourceControllerFailing { + if status.Conditions[0].Type != operatorStatusBackingResourceControllerDegraded { t.Errorf("expected status condition to be failing, got %v", status.Conditions[0].Type) } if status.Conditions[0].Reason != "Error" { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go index 3643e6742c27..01641be80f64 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go @@ -26,6 +26,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/loglevel" "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" "github.com/openshift/library-go/pkg/operator/resource/resourceread" @@ -35,11 +36,11 @@ import ( ) const ( - operatorStatusInstallerControllerFailing = "InstallerControllerFailing" - nodeInstallerFailing = "NodeInstallerFailing" - installerControllerWorkQueueKey = "key" - manifestDir = "pkg/operator/staticpod/controller/installer" - manifestInstallerPodPath = "manifests/installer-pod.yaml" + operatorStatusInstallerControllerDegraded = "InstallerControllerDegraded" + nodeInstallerDegraded = "NodeInstallerDegraded" + installerControllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/installer" + manifestInstallerPodPath = "manifests/installer-pod.yaml" hostResourceDirDir = "/etc/kubernetes/static-pod-resources" hostPodManifestDir = "/etc/kubernetes/manifests" @@ -65,16 +66,16 @@ type InstallerController struct { certSecrets []revision.RevisionResource certDir string - operatorConfigClient v1helpers.StaticPodOperatorClient + operatorClient v1helpers.StaticPodOperatorClient configMapsGetter corev1client.ConfigMapsGetter + secretsGetter corev1client.SecretsGetter podsGetter corev1client.PodsGetter + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface eventRecorder events.Recorder - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface - // installerPodImageFn returns the image name for the installer pod installerPodImageFn func() string // ownerRefsFn sets the ownerrefs on the pruner pod @@ -117,8 +118,9 @@ func NewInstallerController( secrets []revision.RevisionResource, command []string, kubeInformersForTargetNamespace informers.SharedInformerFactory, - operatorConfigClient v1helpers.StaticPodOperatorClient, + operatorClient v1helpers.StaticPodOperatorClient, configMapsGetter corev1client.ConfigMapsGetter, + secretsGetter corev1client.SecretsGetter, podsGetter corev1client.PodsGetter, eventRecorder events.Recorder, ) *InstallerController { @@ -129,10 +131,11 @@ func NewInstallerController( secrets: secrets, command: command, - operatorConfigClient: operatorConfigClient, - configMapsGetter: configMapsGetter, - podsGetter: podsGetter, - eventRecorder: eventRecorder.WithComponentSuffix("installer-controller"), + operatorClient: operatorClient, + configMapsGetter: configMapsGetter, + secretsGetter: secretsGetter, + podsGetter: podsGetter, + eventRecorder: eventRecorder.WithComponentSuffix("installer-controller"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerController"), @@ -140,9 +143,13 @@ func NewInstallerController( } c.ownerRefsFn = c.setOwnerRefs - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + + operatorClient.Informer().AddEventHandler(c.eventHandler()) kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + return c } @@ -299,7 +306,7 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St // it's an extra write/read, but it makes the state debuggable from outside this process if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { klog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState)) - newOperatorStatus, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions) + newOperatorStatus, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions) if updateError != nil { return false, updateError } else if updated && currNodeState.CurrentRevision != newCurrNodeState.CurrentRevision { @@ -340,7 +347,7 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St // it's an extra write/read, but it makes the state debuggable from outside this process if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { klog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState)) - if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { + if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { return false, updateError } else if updated && currNodeState.TargetRevision != newCurrNodeState.TargetRevision && newCurrNodeState.TargetRevision != 0 { c.eventRecorder.Eventf("NodeTargetRevisionChanged", "Updating node %q from revision %d to %d", currNodeState.NodeName, @@ -485,14 +492,14 @@ func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1 failingDescription := strings.Join(failingStrings, "; ") v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ - Type: nodeInstallerFailing, + Type: nodeInstallerDegraded, Status: operatorv1.ConditionTrue, Reason: "InstallerPodFailed", Message: failingDescription, }) } else { v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ - Type: nodeInstallerFailing, + Type: nodeInstallerDegraded, Status: operatorv1.ConditionFalse, }) } @@ -624,8 +631,9 @@ func (c *InstallerController) ensureInstallerPod(nodeName string, operatorSpec * if c.configMaps[0].Optional { return fmt.Errorf("pod configmap %s is required, cannot be optional", c.configMaps[0].Name) } + args := []string{ - "-v=4", // TODO: Make this configurable? + fmt.Sprintf("-v=%d", loglevel.LogLevelToKlog(operatorSpec.LogLevel)), fmt.Sprintf("--revision=%d", revision), fmt.Sprintf("--namespace=%s", pod.Namespace), fmt.Sprintf("--pod=%s", c.configMaps[0].Name), @@ -695,8 +703,47 @@ func getInstallerPodImageFromEnv() string { return os.Getenv("OPERATOR_IMAGE") } +// ensureCerts makes sure that our certs are ready or it will return an error to trigger a requeue so that we try again +func (c InstallerController) ensureCerts() error { + missing := []string{} + for _, cm := range c.certConfigMaps { + if cm.Optional { + continue + } + _, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(cm.Name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing = append(missing, "configmaps/"+cm.Name) + continue + } + return err + } + for _, s := range c.certSecrets { + if s.Optional { + continue + } + _, err := c.secretsGetter.Secrets(c.targetNamespace).Get(s.Name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing = append(missing, "secrets/"+s.Name) + continue + } + return err + } + + if len(missing) == 0 { + return nil + } + + return fmt.Errorf("missing: %v", strings.Join(missing, ",")) +} + func (c InstallerController) sync() error { - operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorConfigClient.GetStaticPodOperatorState() + operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { return err } @@ -706,6 +753,10 @@ func (c InstallerController) sync() error { return nil } + if err := c.ensureCerts(); err != nil { + return err + } + requeue, syncErr := c.manageInstallationPods(operatorSpec, operatorStatus, resourceVersion) if requeue && syncErr == nil { return fmt.Errorf("synthetic requeue request") @@ -714,7 +765,7 @@ func (c InstallerController) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusInstallerControllerFailing, + Type: operatorStatusInstallerControllerDegraded, Status: operatorv1.ConditionFalse, } if err != nil { @@ -722,7 +773,7 @@ func (c InstallerController) sync() error { cond.Reason = "Error" cond.Message = err.Error() } - if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, v1helpers.UpdateStaticPodConditionFn(cond), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { if err == nil { return updateError } @@ -738,6 +789,9 @@ func (c *InstallerController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting InstallerController") defer klog.Infof("Shutting down InstallerController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go index 69f744d5bcae..6df3d6b33c44 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -28,23 +28,19 @@ import ( func TestNewNodeStateForInstallInProgress(t *testing.T) { kubeClient := fake.NewSimpleClientset() - var installerPod *v1.Pod + var installerPod *corev1.Pod kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { if installerPod != nil { return true, nil, errors.NewAlreadyExists(schema.GroupResource{Resource: "pods"}, installerPod.Name) } - installerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) kubeClient.PrependReactor("get", "pods", getPodsReactor(installerPod)) return true, installerPod, nil }) kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -61,9 +57,10 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { }, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) podCommand := []string{"/bin/true", "--foo=test", "--bar"} c := NewInstallerController( "test", "test-pod", @@ -74,6 +71,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -140,7 +138,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } t.Log("installer succeeded") - installerPod.Status.Phase = v1.PodSucceeded + installerPod.Status.Phase = corev1.PodSucceeded if err := c.sync(); err != nil { t.Fatal(err) @@ -152,21 +150,21 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } t.Log("static pod launched, but is not ready") - staticPod := &v1.Pod{ + staticPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pod-test-node-1", Namespace: "test", Labels: map[string]string{"revision": "1"}, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Status: v1.ConditionFalse, - Type: v1.PodReady, + Status: corev1.ConditionFalse, + Type: corev1.PodReady, }, }, - Phase: v1.PodRunning, + Phase: corev1.PodRunning, }, } kubeClient.PrependReactor("get", "pods", getPodsReactor(staticPod)) @@ -181,7 +179,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } t.Log("static pod is ready") - staticPod.Status.Conditions[0].Status = v1.ConditionTrue + staticPod.Status.Conditions[0].Status = corev1.ConditionTrue if err := c.sync(); err != nil { t.Fatal(err) @@ -199,12 +197,12 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { fakeStaticPodOperatorClient.UpdateStaticPodOperatorStatus("1", currStatus) installerPod.Name = "installer-2-test-node-1" - installerPod.Status.Phase = v1.PodFailed - installerPod.Status.ContainerStatuses = []v1.ContainerStatus{ + installerPod.Status.Phase = corev1.PodFailed + installerPod.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "installer", - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{Message: "fake death"}, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{Message: "fake death"}, }, }, } @@ -233,7 +231,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } } -func getPodsReactor(pods ...*v1.Pod) ktesting.ReactionFunc { +func getPodsReactor(pods ...*corev1.Pod) ktesting.ReactionFunc { return func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { podName := action.(ktesting.GetAction).GetName() for _, p := range pods { @@ -248,18 +246,14 @@ func getPodsReactor(pods ...*v1.Pod) ktesting.ReactionFunc { func TestCreateInstallerPod(t *testing.T) { kubeClient := fake.NewSimpleClientset() - var installerPod *v1.Pod + var installerPod *corev1.Pod kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - installerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) return false, nil, nil }) kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -276,8 +270,9 @@ func TestCreateInstallerPod(t *testing.T) { }, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewInstallerController( "test", "test-pod", @@ -288,6 +283,7 @@ func TestCreateInstallerPod(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -327,7 +323,7 @@ func TestCreateInstallerPod(t *testing.T) { } expectedArgs := []string{ - "-v=4", + "-v=2", "--revision=1", "--namespace=test", "--pod=test-config", @@ -359,7 +355,7 @@ func TestEnsureInstallerPod(t *testing.T) { { name: "normal", expectedArgs: []string{ - "-v=4", + "-v=2", "--revision=1", "--namespace=test", "--pod=test-config", @@ -374,7 +370,7 @@ func TestEnsureInstallerPod(t *testing.T) { { name: "optional", expectedArgs: []string{ - "-v=4", + "-v=2", "--revision=1", "--namespace=test", "--pod=test-config", @@ -399,7 +395,7 @@ func TestEnsureInstallerPod(t *testing.T) { { name: "first-cm-not-optional", expectedArgs: []string{ - "-v=4", + "-v=2", "--revision=1", "--namespace=test", "--pod=test-config", @@ -417,18 +413,14 @@ func TestEnsureInstallerPod(t *testing.T) { t.Run(tt.name, func(t *testing.T) { kubeClient := fake.NewSimpleClientset() - var installerPod *v1.Pod + var installerPod *corev1.Pod kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - installerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) return false, nil, nil }) kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -445,8 +437,9 @@ func TestEnsureInstallerPod(t *testing.T) { }, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewInstallerController( "test", "test-pod", @@ -457,12 +450,13 @@ func TestEnsureInstallerPod(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { return []metav1.OwnerReference{}, nil } - err := c.ensureInstallerPod("test-node-1", nil, 1) + err := c.ensureInstallerPod("test-node-1", &operatorv1.StaticPodOperatorSpec{}, 1) if err != nil { if tt.expectedErr == "" { t.Errorf("InstallerController.ensureInstallerPod() expected no error, got = %v", err) @@ -492,23 +486,23 @@ func TestEnsureInstallerPod(t *testing.T) { } func TestCreateInstallerPodMultiNode(t *testing.T) { - newStaticPod := func(name string, revision int, phase v1.PodPhase, ready bool) *v1.Pod { - condStatus := v1.ConditionTrue + newStaticPod := func(name string, revision int, phase corev1.PodPhase, ready bool) *corev1.Pod { + condStatus := corev1.ConditionTrue if !ready { - condStatus = v1.ConditionFalse + condStatus = corev1.ConditionFalse } - return &v1.Pod{ + return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "test", Labels: map[string]string{"revision": strconv.Itoa(revision)}, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { Status: condStatus, - Type: v1.PodReady, + Type: corev1.PodReady, }, }, Phase: phase, @@ -519,7 +513,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { tests := []struct { name string nodeStatuses []operatorv1.NodeStatus - staticPods []*v1.Pod + staticPods []*corev1.Pod latestAvailableRevision int32 expectedUpgradeOrder []int expectedSyncError []bool @@ -527,7 +521,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { numOfInstallersOOM int }{ { - name: "three fresh nodes", + name: "three fresh nodes", latestAvailableRevision: 1, nodeStatuses: []operatorv1.NodeStatus{ { @@ -543,7 +537,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { expectedUpgradeOrder: []int{0, 1, 2}, }, { - name: "three nodes with current revision, all static pods ready", + name: "three nodes with current revision, all static pods ready", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -559,15 +553,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{0, 1, 2}, }, { - name: "one node already transitioning", + name: "one node already transitioning", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -584,15 +578,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, { - name: "one node already transitioning, although it is newer", + name: "one node already transitioning, although it is newer", latestAvailableRevision: 3, nodeStatuses: []operatorv1.NodeStatus{ { @@ -609,15 +603,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, { - name: "three nodes, 2 not updated, one with failure in last revision", + name: "three nodes, 2 not updated, one with failure in last revision", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -634,15 +628,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{}, }, { - name: "three nodes, 2 not updated, one with failure in old revision", + name: "three nodes, 2 not updated, one with failure in old revision", latestAvailableRevision: 3, nodeStatuses: []operatorv1.NodeStatus{ { @@ -659,15 +653,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 2, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{0, 1, 2}, }, { - name: "three nodes with outdated current revision, second static pods unready", + name: "three nodes with outdated current revision, second static pods unready", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -683,15 +677,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, { - name: "four nodes with outdated current revision, installer of 2nd was OOM killed, two more OOM happen, then success", + name: "four nodes with outdated current revision, installer of 2nd was OOM killed, two more OOM happen, then success", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -707,10 +701,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, // we call sync 2*3 times: // 1. notice update of node 1 @@ -723,7 +717,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { numOfInstallersOOM: 2, }, { - name: "three nodes with outdated current revision, 2nd & 3rd static pods unready", + name: "three nodes with outdated current revision, 2nd & 3rd static pods unready", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -739,15 +733,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, false), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, false), }, expectedUpgradeOrder: []int{1, 2, 0}, }, { - name: "updated node unready and newer version available, but updated again before older nodes are touched", + name: "updated node unready and newer version available, but updated again before older nodes are touched", latestAvailableRevision: 3, nodeStatuses: []operatorv1.NodeStatus{ { @@ -763,15 +757,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, { - name: "two nodes on revision 1 and one node on revision 4", + name: "two nodes on revision 1 and one node on revision 4", latestAvailableRevision: 5, nodeStatuses: []operatorv1.NodeStatus{ { @@ -787,15 +781,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 4, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 4, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 2, 0}, }, { - name: "two nodes 2 revisions behind and 1 node on latest available revision", + name: "two nodes 2 revisions behind and 1 node on latest available revision", latestAvailableRevision: 3, nodeStatuses: []operatorv1.NodeStatus{ { @@ -811,15 +805,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodSucceeded, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodSucceeded, true), }, expectedUpgradeOrder: []int{1, 2}, }, { - name: "two nodes at different revisions behind and 1 node on latest available revision", + name: "two nodes at different revisions behind and 1 node on latest available revision", latestAvailableRevision: 3, nodeStatuses: []operatorv1.NodeStatus{ { @@ -835,15 +829,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodSucceeded, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodSucceeded, true), }, expectedUpgradeOrder: []int{2, 1}, }, { - name: "second node with old static pod than current revision", + name: "second node with old static pod than current revision", latestAvailableRevision: 3, nodeStatuses: []operatorv1.NodeStatus{ { @@ -859,15 +853,15 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 2, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, v1.PodRunning, false), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, corev1.PodRunning, false), }, expectedUpgradeOrder: []int{1, 2, 0}, }, { - name: "first update status fails", + name: "first update status fails", latestAvailableRevision: 2, nodeStatuses: []operatorv1.NodeStatus{ { @@ -882,9 +876,9 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { for i, test := range tests { t.Run(test.name, func(t *testing.T) { - createdInstallerPods := []*v1.Pod{} - installerPods := map[string]*v1.Pod{} - updatedStaticPods := map[string]*v1.Pod{} + createdInstallerPods := []*corev1.Pod{} + installerPods := map[string]*corev1.Pod{} + updatedStaticPods := map[string]*corev1.Pod{} installerNodeAndID := func(installerName string) (string, int) { ss := strings.SplitN(strings.TrimPrefix(installerName, "installer-"), "-", 2) @@ -897,21 +891,21 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { kubeClient := fake.NewSimpleClientset() kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - createdPod := action.(ktesting.CreateAction).GetObject().(*v1.Pod) + createdPod := action.(ktesting.CreateAction).GetObject().(*corev1.Pod) createdInstallerPods = append(createdInstallerPods, createdPod) if _, found := installerPods[createdPod.Name]; found { - return false, nil, errors.NewAlreadyExists(v1.SchemeGroupVersion.WithResource("pods").GroupResource(), createdPod.Name) + return false, nil, errors.NewAlreadyExists(corev1.SchemeGroupVersion.WithResource("pods").GroupResource(), createdPod.Name) } installerPods[createdPod.Name] = createdPod if test.numOfInstallersOOM > 0 { test.numOfInstallersOOM-- - createdPod.Status.Phase = v1.PodFailed - createdPod.Status.ContainerStatuses = []v1.ContainerStatus{ + createdPod.Status.Phase = corev1.PodFailed + createdPod.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "container", - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ ExitCode: 1, Reason: "OOMKilled", Message: "killed by OOM", @@ -924,12 +918,12 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { // Once the installer pod is created, set its status to succeeded. // Note that in reality, this will probably take couple sync cycles to happen, however it is useful to do this fast // to rule out timing bugs. - createdPod.Status.Phase = v1.PodSucceeded + createdPod.Status.Phase = corev1.PodSucceeded nodeName, id := installerNodeAndID(createdPod.Name) staticPodName := mirrorPodNameForNode("test-pod", nodeName) - updatedStaticPods[staticPodName] = newStaticPod(staticPodName, id, v1.PodRunning, true) + updatedStaticPods[staticPodName] = newStaticPod(staticPodName, id, corev1.PodRunning, true) } return true, nil, nil @@ -974,10 +968,6 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { return err } fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -988,9 +978,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { NodeStatuses: test.nodeStatuses, }, statusUpdateErrorFunc, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewInstallerController( fmt.Sprintf("test-%d", i), "test-pod", @@ -1001,6 +992,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -1070,17 +1062,17 @@ func TestInstallerController_manageInstallationPods(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &InstallerController{ - targetNamespace: tt.fields.targetNamespace, - staticPodName: tt.fields.staticPodName, - configMaps: tt.fields.configMaps, - secrets: tt.fields.secrets, - command: tt.fields.command, - operatorConfigClient: tt.fields.operatorConfigClient, - configMapsGetter: tt.fields.kubeClient.CoreV1(), - podsGetter: tt.fields.kubeClient.CoreV1(), - eventRecorder: tt.fields.eventRecorder, - queue: tt.fields.queue, - installerPodImageFn: tt.fields.installerPodImageFn, + targetNamespace: tt.fields.targetNamespace, + staticPodName: tt.fields.staticPodName, + configMaps: tt.fields.configMaps, + secrets: tt.fields.secrets, + command: tt.fields.command, + operatorClient: tt.fields.operatorConfigClient, + configMapsGetter: tt.fields.kubeClient.CoreV1(), + podsGetter: tt.fields.kubeClient.CoreV1(), + eventRecorder: tt.fields.eventRecorder, + queue: tt.fields.queue, + installerPodImageFn: tt.fields.installerPodImageFn, } got, err := c.manageInstallationPods(tt.args.operatorSpec, tt.args.originalOperatorStatus, tt.args.resourceVersion) if (err != nil) != tt.wantErr { @@ -1290,7 +1282,7 @@ func TestSetConditions(t *testing.T) { } testCases := []TestCase{ - testCase("AvailableProgressingFailing", true, true, true, 1, 2, 2, 1, 2, 1), + testCase("AvailableProgressingDegraded", true, true, true, 1, 2, 2, 1, 2, 1), testCase("AvailableProgressing", true, true, false, 0, 2, 2, 1, 2, 1), testCase("AvailableNotProgressing", true, false, false, 0, 2, 2, 2, 2), testCase("NotAvailableProgressing", false, true, false, 0, 2, 0, 0), @@ -1322,7 +1314,7 @@ func TestSetConditions(t *testing.T) { t.Errorf("Progressing condition: expected status %v, actual status %v", tc.expectedProgressingStatus, pendingCondition.Status) } - failingCondition := v1helpers.FindOperatorCondition(status.Conditions, nodeInstallerFailing) + failingCondition := v1helpers.FindOperatorCondition(status.Conditions, nodeInstallerDegraded) if failingCondition == nil { t.Error("Failing condition: not found") } else if failingCondition.Status != tc.expectedFailingStatus { @@ -1332,3 +1324,76 @@ func TestSetConditions(t *testing.T) { } } + +func TestEnsureCert(t *testing.T) { + tests := []struct { + name string + certConfigMaps []revision.RevisionResource + certSecrets []revision.RevisionResource + + startingResources []runtime.Object + expectedErr string + }{ + { + name: "none", + }, + { + name: "skip-optional", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm", Optional: true}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s", Optional: true}, + }, + }, + { + name: "wait-required", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + expectedErr: "configmaps/foo-cm,secrets/foo-s", + }, + { + name: "found-required", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + startingResources: []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-cm"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-s"}}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := fake.NewSimpleClientset(test.startingResources...) + c := &InstallerController{ + targetNamespace: "ns", + certConfigMaps: test.certConfigMaps, + certSecrets: test.certSecrets, + + configMapsGetter: client.CoreV1(), + secretsGetter: client.CoreV1(), + } + + actual := c.ensureCerts() + switch { + case len(test.expectedErr) == 0 && actual == nil: + case len(test.expectedErr) == 0 && actual != nil: + t.Fatal(actual) + case len(test.expectedErr) != 0 && actual == nil: + t.Fatal(actual) + case len(test.expectedErr) != 0 && actual != nil && !strings.Contains(actual.Error(), test.expectedErr): + t.Fatal(actual) + } + + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go index debf59455204..77b59f1373eb 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go @@ -27,9 +27,9 @@ import ( ) const ( - operatorStatusMonitoringResourceControllerFailing = "MonitoringResourceControllerFailing" - controllerWorkQueueKey = "key" - manifestDir = "pkg/operator/staticpod/controller/monitoring" + operatorStatusMonitoringResourceControllerDegraded = "MonitoringResourceControllerDegraded" + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/monitoring" ) type MonitoringResourceController struct { @@ -37,50 +37,49 @@ type MonitoringResourceController struct { serviceMonitorName string clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister - // preRunCachesSynced are the set of caches that must be synced before the controller will start doing work. This is normally - // the full set of listers and informers you use. - preRunCachesSynced []cache.InformerSynced + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + operatorClient v1helpers.StaticPodOperatorClient - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface - - kubeClient kubernetes.Interface - dynamicClient dynamic.Interface - operatorConfigClient v1helpers.StaticPodOperatorClient - eventRecorder events.Recorder + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } // NewMonitoringResourceController creates a new backing resource controller. func NewMonitoringResourceController( targetNamespace string, serviceMonitorName string, - operatorConfigClient v1helpers.StaticPodOperatorClient, + operatorClient v1helpers.StaticPodOperatorClient, kubeInformersForTargetNamespace informers.SharedInformerFactory, kubeClient kubernetes.Interface, dynamicClient dynamic.Interface, eventRecorder events.Recorder, ) *MonitoringResourceController { c := &MonitoringResourceController{ - targetNamespace: targetNamespace, - operatorConfigClient: operatorConfigClient, - eventRecorder: eventRecorder.WithComponentSuffix("monitoring-resource-controller"), - serviceMonitorName: serviceMonitorName, + targetNamespace: targetNamespace, + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("monitoring-resource-controller"), + serviceMonitorName: serviceMonitorName, clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), - preRunCachesSynced: []cache.InformerSynced{ + cachesToSync: []cache.InformerSynced{ kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced, - operatorConfigClient.Informer().HasSynced, + operatorClient.Informer().HasSynced, }, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "MonitoringResourceController"), kubeClient: kubeClient, dynamicClient: dynamicClient, } - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + operatorClient.Informer().AddEventHandler(c.eventHandler()) // TODO: We need a dynamic informer here to observe changes to ServiceMonitor resource. - kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().HasSynced) + return c } @@ -94,7 +93,7 @@ func (c MonitoringResourceController) mustTemplateAsset(name string) ([]byte, er } func (c MonitoringResourceController) sync() error { - operatorSpec, _, _, err := c.operatorConfigClient.GetStaticPodOperatorState() + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { return err } @@ -127,7 +126,7 @@ func (c MonitoringResourceController) sync() error { // NOTE: Failing to create the monitoring resources should not lead to operator failed state. cond := operatorv1.OperatorCondition{ - Type: operatorStatusMonitoringResourceControllerFailing, + Type: operatorStatusMonitoringResourceControllerDegraded, Status: operatorv1.ConditionFalse, } if err != nil { @@ -137,7 +136,7 @@ func (c MonitoringResourceController) sync() error { cond.Reason = "Error" cond.Message = err.Error() } - if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { if err == nil { return updateError } @@ -152,7 +151,7 @@ func (c *MonitoringResourceController) Run(workers int, stopCh <-chan struct{}) klog.Infof("Starting MonitoringResourceController") defer klog.Infof("Shutting down MonitoringResourceController") - if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go index cbef43f34b6f..7b62524ef132 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go @@ -58,10 +58,6 @@ func TestNewMonitoringResourcesController(t *testing.T) { { name: "create when not exists", staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -69,6 +65,7 @@ func TestNewMonitoringResourcesController(t *testing.T) { }, &operatorv1.StaticPodOperatorStatus{}, nil, + nil, ), validateActions: func(t *testing.T, actions []clienttesting.Action) { if len(actions) != 4 { @@ -94,10 +91,6 @@ func TestNewMonitoringResourcesController(t *testing.T) { { name: "skip when exists", staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -105,6 +98,7 @@ func TestNewMonitoringResourcesController(t *testing.T) { }, &operatorv1.StaticPodOperatorStatus{}, nil, + nil, ), startingDynamicObjects: []runtime.Object{mustAssetServiceMonitor("target-namespace")}, validateActions: func(t *testing.T, actions []clienttesting.Action) {}, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go index c3ef0ffa5219..85fb82965de1 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go @@ -26,39 +26,40 @@ const nodeControllerWorkQueueKey = "key" // NodeController watches for new master nodes and adds them to the node status list in the operator config status. type NodeController struct { - operatorConfigClient v1helpers.StaticPodOperatorClient - eventRecorder events.Recorder + operatorClient v1helpers.StaticPodOperatorClient - nodeListerSynced cache.InformerSynced - nodeLister corelisterv1.NodeLister + nodeLister corelisterv1.NodeLister - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } // NewNodeController creates a new node controller. func NewNodeController( - operatorConfigClient v1helpers.StaticPodOperatorClient, + operatorClient v1helpers.StaticPodOperatorClient, kubeInformersClusterScoped informers.SharedInformerFactory, eventRecorder events.Recorder, ) *NodeController { c := &NodeController{ - operatorConfigClient: operatorConfigClient, - eventRecorder: eventRecorder.WithComponentSuffix("node-controller"), - nodeListerSynced: kubeInformersClusterScoped.Core().V1().Nodes().Informer().HasSynced, - nodeLister: kubeInformersClusterScoped.Core().V1().Nodes().Lister(), + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("node-controller"), + nodeLister: kubeInformersClusterScoped.Core().V1().Nodes().Lister(), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "NodeController"), } - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + operatorClient.Informer().AddEventHandler(c.eventHandler()) kubeInformersClusterScoped.Core().V1().Nodes().Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersClusterScoped.Core().V1().Nodes().Informer().HasSynced) + return c } func (c NodeController) sync() error { - _, originalOperatorStatus, resourceVersion, err := c.operatorConfigClient.GetStaticPodOperatorState() + _, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { return err } @@ -107,7 +108,7 @@ func (c NodeController) sync() error { operatorStatus.NodeStatuses = newTargetNodeStates if !equality.Semantic.DeepEqual(originalOperatorStatus, operatorStatus) { - if _, updateError := c.operatorConfigClient.UpdateStaticPodOperatorStatus(resourceVersion, operatorStatus); updateError != nil { + if _, updateError := c.operatorClient.UpdateStaticPodOperatorStatus(resourceVersion, operatorStatus); updateError != nil { return updateError } } @@ -122,7 +123,7 @@ func (c *NodeController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting NodeController") defer klog.Infof("Shutting down NodeController") - if !cache.WaitForCacheSync(stopCh, c.nodeListerSynced) { + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go index 4309e7951da7..37ec665f0224 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go @@ -105,10 +105,6 @@ func TestNewNodeController(t *testing.T) { fakeLister := v1helpers.NewFakeNodeLister(kubeClient) kubeInformers := informers.NewSharedInformerFactory(kubeClient, 1*time.Minute) fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -119,6 +115,7 @@ func TestNewNodeController(t *testing.T) { NodeStatuses: test.startNodeStatus, }, nil, + nil, ) eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go index 727d78b11128..fb4a9c3365db 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go @@ -34,20 +34,21 @@ type PruneController struct { targetNamespace, podResourcePrefix string // command is the string to use for the pruning pod command command []string - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface // prunerPodImageFn returns the image name for the pruning pod prunerPodImageFn func() string // ownerRefsFn sets the ownerrefs on the pruner pod ownerRefsFn func(revision int32) ([]metav1.OwnerReference, error) - operatorConfigClient v1helpers.StaticPodOperatorClient + operatorClient v1helpers.StaticPodOperatorClient configMapGetter corev1client.ConfigMapsGetter secretGetter corev1client.SecretsGetter podGetter corev1client.PodsGetter - eventRecorder events.Recorder + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } const ( @@ -64,7 +65,7 @@ func NewPruneController( configMapGetter corev1client.ConfigMapsGetter, secretGetter corev1client.SecretsGetter, podGetter corev1client.PodsGetter, - operatorConfigClient v1helpers.StaticPodOperatorClient, + operatorClient v1helpers.StaticPodOperatorClient, eventRecorder events.Recorder, ) *PruneController { c := &PruneController{ @@ -72,7 +73,7 @@ func NewPruneController( podResourcePrefix: podResourcePrefix, command: command, - operatorConfigClient: operatorConfigClient, + operatorClient: operatorClient, configMapGetter: configMapGetter, secretGetter: secretGetter, @@ -84,7 +85,10 @@ func NewPruneController( } c.ownerRefsFn = c.setOwnerRefs - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) return c } @@ -280,6 +284,9 @@ func (c *PruneController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting PruneController") defer klog.Infof("Shutting down PruneController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) @@ -313,7 +320,7 @@ func (c *PruneController) processNextWorkItem() bool { func (c *PruneController) sync() error { klog.V(5).Info("Syncing revision pruner") - operatorSpec, operatorStatus, _, err := c.operatorConfigClient.GetStaticPodOperatorState() + operatorSpec, operatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { return err } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go index d8151d43ec31..58fec73ba54b 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go @@ -178,10 +178,6 @@ func TestPruneAPIResources(t *testing.T) { for _, tc := range tests { kubeClient := fake.NewSimpleClientset(tc.startingObjects...) fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ FailedRevisionLimit: tc.failedLimit, SucceededRevisionLimit: tc.succeededLimit, @@ -200,6 +196,7 @@ func TestPruneAPIResources(t *testing.T) { }, }, nil, + nil, ) eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) @@ -215,21 +212,21 @@ func TestPruneAPIResources(t *testing.T) { } c := &PruneController{ - targetNamespace: tc.targetNamespace, - podResourcePrefix: "test-pod", - command: []string{"/bin/true"}, - configMapGetter: kubeClient.CoreV1(), - secretGetter: kubeClient.CoreV1(), - podGetter: kubeClient.CoreV1(), - eventRecorder: eventRecorder, - operatorConfigClient: fakeStaticPodOperatorClient, + targetNamespace: tc.targetNamespace, + podResourcePrefix: "test-pod", + command: []string{"/bin/true"}, + configMapGetter: kubeClient.CoreV1(), + secretGetter: kubeClient.CoreV1(), + podGetter: kubeClient.CoreV1(), + eventRecorder: eventRecorder, + operatorClient: fakeStaticPodOperatorClient, } c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { return []metav1.OwnerReference{}, nil } c.prunerPodImageFn = func() string { return "docker.io/foo/bar" } - operatorSpec, _, _, err := c.operatorConfigClient.GetStaticPodOperatorState() + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { t.Fatalf("unexpected error %q", err) } @@ -394,10 +391,6 @@ func TestPruneDiskResources(t *testing.T) { }) fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ FailedRevisionLimit: test.failedLimit, SucceededRevisionLimit: test.succeededLimit, @@ -416,6 +409,7 @@ func TestPruneDiskResources(t *testing.T) { }, }, nil, + nil, ) eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) @@ -431,21 +425,21 @@ func TestPruneDiskResources(t *testing.T) { } c := &PruneController{ - targetNamespace: "test", - podResourcePrefix: "test-pod", - command: []string{"/bin/true"}, - configMapGetter: kubeClient.CoreV1(), - secretGetter: kubeClient.CoreV1(), - podGetter: kubeClient.CoreV1(), - eventRecorder: eventRecorder, - operatorConfigClient: fakeStaticPodOperatorClient, + targetNamespace: "test", + podResourcePrefix: "test-pod", + command: []string{"/bin/true"}, + configMapGetter: kubeClient.CoreV1(), + secretGetter: kubeClient.CoreV1(), + podGetter: kubeClient.CoreV1(), + eventRecorder: eventRecorder, + operatorClient: fakeStaticPodOperatorClient, } c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { return []metav1.OwnerReference{}, nil } c.prunerPodImageFn = func() string { return "docker.io/foo/bar" } - operatorSpec, _, _, err := c.operatorConfigClient.GetStaticPodOperatorState() + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { t.Fatalf("unexpected error %q", err) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go index f450aba22c62..31b9eb20e0fd 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go @@ -25,7 +25,7 @@ import ( "github.com/openshift/library-go/pkg/operator/v1helpers" ) -const operatorStatusRevisionControllerFailing = "RevisionControllerFailing" +const operatorStatusRevisionControllerDegraded = "RevisionControllerDegraded" const revisionControllerWorkQueueKey = "key" // RevisionController is a controller that watches a set of configmaps and secrets and them against a revision snapshot @@ -39,13 +39,12 @@ type RevisionController struct { // secrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these. secrets []RevisionResource - operatorConfigClient v1helpers.StaticPodOperatorClient - configMapGetter corev1client.ConfigMapsGetter - secretGetter corev1client.SecretsGetter - - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + operatorClient v1helpers.StaticPodOperatorClient + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface eventRecorder events.Recorder } @@ -60,7 +59,7 @@ func NewRevisionController( configMaps []RevisionResource, secrets []RevisionResource, kubeInformersForTargetNamespace informers.SharedInformerFactory, - operatorConfigClient v1helpers.StaticPodOperatorClient, + operatorClient v1helpers.StaticPodOperatorClient, configMapGetter corev1client.ConfigMapsGetter, secretGetter corev1client.SecretsGetter, eventRecorder events.Recorder, @@ -70,18 +69,22 @@ func NewRevisionController( configMaps: configMaps, secrets: secrets, - operatorConfigClient: operatorConfigClient, - configMapGetter: configMapGetter, - secretGetter: secretGetter, - eventRecorder: eventRecorder.WithComponentSuffix("revision-controller"), + operatorClient: operatorClient, + configMapGetter: configMapGetter, + secretGetter: secretGetter, + eventRecorder: eventRecorder.WithComponentSuffix("revision-controller"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RevisionController"), } - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + operatorClient.Informer().AddEventHandler(c.eventHandler()) kubeInformersForTargetNamespace.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) kubeInformersForTargetNamespace.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().ConfigMaps().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Secrets().Informer().HasSynced) + return c } @@ -102,12 +105,12 @@ func (c RevisionController) createRevisionIfNeeded(operatorSpec *operatorv1.Stat c.eventRecorder.Eventf("RevisionTriggered", "new revision %d triggered by %q", nextRevision, reason) if err := c.createNewRevision(nextRevision); err != nil { cond := operatorv1.OperatorCondition{ - Type: "RevisionControllerFailing", + Type: "RevisionControllerDegraded", Status: operatorv1.ConditionTrue, Reason: "ContentCreationError", Message: err.Error(), } - if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { c.eventRecorder.Warningf("RevisionCreateFailed", "Failed to create revision %d: %v", nextRevision, err.Error()) return true, updateError } @@ -115,10 +118,10 @@ func (c RevisionController) createRevisionIfNeeded(operatorSpec *operatorv1.Stat } cond := operatorv1.OperatorCondition{ - Type: "RevisionControllerFailing", + Type: "RevisionControllerDegraded", Status: operatorv1.ConditionFalse, } - if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, v1helpers.UpdateStaticPodConditionFn(cond), func(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), func(operatorStatus *operatorv1.StaticPodOperatorStatus) error { if operatorStatus.LatestAvailableRevision == nextRevision { klog.Warningf("revision %d is unexpectedly already the latest available revision. This is a possible race!", nextRevision) return fmt.Errorf("conflicting latestAvailableRevision %d", operatorStatus.LatestAvailableRevision) @@ -246,7 +249,7 @@ func (c RevisionController) createNewRevision(revision int32) error { } func (c RevisionController) sync() error { - operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorConfigClient.GetStaticPodOperatorStateWithQuorum() + operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorStateWithQuorum() if err != nil { return err } @@ -264,7 +267,7 @@ func (c RevisionController) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusRevisionControllerFailing, + Type: operatorStatusRevisionControllerDegraded, Status: operatorv1.ConditionFalse, } if err != nil { @@ -272,7 +275,7 @@ func (c RevisionController) sync() error { cond.Reason = "Error" cond.Message = err.Error() } - if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { if err == nil { return updateError } @@ -288,6 +291,9 @@ func (c *RevisionController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting RevisionController") defer klog.Infof("Shutting down RevisionController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go index 3b65872073c8..858ae06407da 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go @@ -52,10 +52,6 @@ func TestRevisionController(t *testing.T) { testName: "operator-unmanaged", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Unmanaged, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Unmanaged, @@ -63,6 +59,7 @@ func TestRevisionController(t *testing.T) { }, &operatorv1.StaticPodOperatorStatus{}, nil, + nil, ), validateActions: func(t *testing.T, actions []clienttesting.Action) { createdObjects := filterCreateActions(actions) @@ -75,10 +72,6 @@ func TestRevisionController(t *testing.T) { testName: "missing-source-resources", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -95,12 +88,13 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), testConfigs: []RevisionResource{{Name: "test-config"}}, testSecrets: []RevisionResource{{Name: "test-secret"}}, expectSyncError: "synthetic requeue request", validateStatus: func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) { - if status.Conditions[0].Type != "RevisionControllerFailing" { + if status.Conditions[0].Type != "RevisionControllerDegraded" { t.Errorf("expected status condition to be 'RevisionControllerFailing', got %v", status.Conditions[0].Type) } if status.Conditions[0].Reason != "ContentCreationError" { @@ -115,10 +109,6 @@ func TestRevisionController(t *testing.T) { testName: "copy-resources", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -135,6 +125,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -185,10 +176,6 @@ func TestRevisionController(t *testing.T) { testName: "copy-resources-opt", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -205,6 +192,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -267,10 +255,6 @@ func TestRevisionController(t *testing.T) { testName: "copy-resources-opt-missing", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -287,6 +271,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -331,10 +316,6 @@ func TestRevisionController(t *testing.T) { testName: "latest-revision-current", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -351,6 +332,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -372,10 +354,6 @@ func TestRevisionController(t *testing.T) { testName: "latest-revision-current-optionals-missing", targetNamespace: targetNamespace, staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( - &operatorv1.OperatorSpec{ - ManagementState: operatorv1.Managed, - }, - &operatorv1.OperatorStatus{}, &operatorv1.StaticPodOperatorSpec{ OperatorSpec: operatorv1.OperatorSpec{ ManagementState: operatorv1.Managed, @@ -392,6 +370,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go index 9cefee1bce82..34e46b98b2e2 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go @@ -24,7 +24,7 @@ import ( ) var ( - staticPodStateControllerFailing = "StaticPodsFailing" + staticPodStateControllerDegraded = "StaticPodsDegraded" staticPodStateControllerWorkQueueKey = "key" ) @@ -36,14 +36,14 @@ type StaticPodStateController struct { operandName string operatorNamespace string - operatorConfigClient v1helpers.StaticPodOperatorClient - configMapGetter corev1client.ConfigMapsGetter - podsGetter corev1client.PodsGetter - versionRecorder status.VersionGetter - eventRecorder events.Recorder + operatorClient v1helpers.StaticPodOperatorClient + configMapGetter corev1client.ConfigMapsGetter + podsGetter corev1client.PodsGetter + versionRecorder status.VersionGetter - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } // NewStaticPodStateController creates a controller that watches static pods and will produce a failing status if the @@ -51,7 +51,7 @@ type StaticPodStateController struct { func NewStaticPodStateController( targetNamespace, staticPodName, operatorNamespace, operandName string, kubeInformersForTargetNamespace informers.SharedInformerFactory, - operatorConfigClient v1helpers.StaticPodOperatorClient, + operatorClient v1helpers.StaticPodOperatorClient, configMapGetter corev1client.ConfigMapsGetter, podsGetter corev1client.PodsGetter, versionRecorder status.VersionGetter, @@ -63,23 +63,26 @@ func NewStaticPodStateController( operandName: operandName, operatorNamespace: operatorNamespace, - operatorConfigClient: operatorConfigClient, - configMapGetter: configMapGetter, - podsGetter: podsGetter, - versionRecorder: versionRecorder, - eventRecorder: eventRecorder.WithComponentSuffix("static-pod-state-controller"), + operatorClient: operatorClient, + configMapGetter: configMapGetter, + podsGetter: podsGetter, + versionRecorder: versionRecorder, + eventRecorder: eventRecorder.WithComponentSuffix("static-pod-state-controller"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StaticPodStateController"), } - operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + operatorClient.Informer().AddEventHandler(c.eventHandler()) kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + return c } func (c *StaticPodStateController) sync() error { - operatorSpec, originalOperatorStatus, _, err := c.operatorConfigClient.GetStaticPodOperatorState() + operatorSpec, originalOperatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { return err } @@ -130,13 +133,13 @@ func (c *StaticPodStateController) sync() error { } else { c.versionRecorder.SetVersion( c.operandName, - status.VersionForOperand(c.operatorNamespace, images.List()[0], c.configMapGetter, c.eventRecorder), + status.VersionForOperandFromEnv(), ) } // update failing condition cond := operatorv1.OperatorCondition{ - Type: staticPodStateControllerFailing, + Type: staticPodStateControllerDegraded, Status: operatorv1.ConditionFalse, } // Failing errors @@ -150,7 +153,7 @@ func (c *StaticPodStateController) sync() error { cond.Reason = "Error" cond.Message = v1helpers.NewMultiLineAggregate(errs).Error() } - if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorConfigClient, v1helpers.UpdateStaticPodConditionFn(cond), v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { if err == nil { return updateError } @@ -170,6 +173,9 @@ func (c *StaticPodStateController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting StaticPodStateController") defer klog.Infof("Shutting down StaticPodStateController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go index b722befc87ef..f17b19871b60 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go @@ -3,6 +3,8 @@ package staticpod import ( "fmt" + "github.com/openshift/library-go/pkg/operator/loglevel" + "github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller" "k8s.io/apimachinery/pkg/util/errors" @@ -162,6 +164,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator operandInformers, b.staticPodOperatorClient, configMapClient, + secretClient, podClient, eventRecorder, ).WithCerts( @@ -227,6 +230,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator } controllers.unsupportedConfigOverridesController = unsupportedconfigoverridescontroller.NewUnsupportedConfigOverridesController(b.staticPodOperatorClient, eventRecorder) + controllers.logLevelController = loglevel.NewClusterOperatorLoggingController(b.staticPodOperatorClient, eventRecorder) errs := []error{} if controllers.revisionController == nil { @@ -257,6 +261,7 @@ type staticPodOperatorControllers struct { backingResourceController *backingresource.BackingResourceController monitoringResourceController *monitoring.MonitoringResourceController unsupportedConfigOverridesController *unsupportedconfigoverridescontroller.UnsupportedConfigOverridesController + logLevelController *loglevel.LogLevelController } func (o *staticPodOperatorControllers) WithInstallerPodMutationFn(installerPodMutationFn installer.InstallerPodMutationFunc) *staticPodOperatorControllers { @@ -273,6 +278,7 @@ func (o *staticPodOperatorControllers) Run(stopCh <-chan struct{}) { go o.backingResourceController.Run(1, stopCh) go o.monitoringResourceController.Run(1, stopCh) go o.unsupportedConfigOverridesController.Run(1, stopCh) + go o.logLevelController.Run(1, stopCh) <-stopCh } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go index 8e2af3a886bf..2738ba2b9544 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go @@ -336,15 +336,16 @@ func (o *InstallOptions) copyContent(ctx context.Context) error { func (o *InstallOptions) Run(ctx context.Context) error { var eventTarget *corev1.ObjectReference - if err := retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { + err := retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { var clientErr error eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(o.KubeClient, o.Namespace, nil) if clientErr != nil { return false, clientErr } return true, nil - }); err != nil { - return fmt.Errorf("failed to get self-reference: %v", err) + }) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) } recorder := events.NewRecorder(o.KubeClient.CoreV1().Events(o.Namespace), "static-pod-installer", eventTarget) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go index bac7c0009396..60979a379f74 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go @@ -3,17 +3,34 @@ package status import ( "fmt" "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// conditionMergeState indicates whether you want to merge all Falses or merge all Trues. For instance, Failures merge +// unionCondition returns a single cluster operator condition that is the union of multiple operator conditions. +func unionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return internalUnionCondition(conditionType, defaultConditionStatus, false, allConditions...) +} + +// unionInertialCondition returns a single cluster operator condition that is the union of multiple operator conditions, +// but resists returning a condition with a status opposite the defaultConditionStatus. +func unionInertialCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return internalUnionCondition(conditionType, defaultConditionStatus, true, allConditions...) +} + +// internalUnionCondition returns a single cluster operator condition that is the union of multiple operator conditions. +// +// defaultConditionStatus indicates whether you want to merge all Falses or merge all Trues. For instance, Failures merge // on true, but Available merges on false. Thing of it like an anti-default. -func unionCondition(conditionType string, defaultConditionState operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { +// +// If hasInertia, then resist returning a condition with a status opposite the defaultConditionStatus. +func internalUnionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, hasInertia bool, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { var oppositeConditionStatus operatorv1.ConditionStatus - if defaultConditionState == operatorv1.ConditionTrue { + if defaultConditionStatus == operatorv1.ConditionTrue { oppositeConditionStatus = operatorv1.ConditionFalse } else { oppositeConditionStatus = operatorv1.ConditionTrue @@ -38,8 +55,10 @@ func unionCondition(conditionType string, defaultConditionState operatorv1.Condi return OperatorConditionToClusterOperatorCondition(unionedCondition) } - if len(badConditions) == 0 { - unionedCondition.Status = defaultConditionState + oneMinuteAgo := time.Now().Add(-1 * time.Minute) + earliestBadConditionNotOldEnough := earliestTransitionTime(badConditions).Time.After(oneMinuteAgo) + if len(badConditions) == 0 || (hasInertia && earliestBadConditionNotOldEnough) { + unionedCondition.Status = defaultConditionStatus unionedCondition.Message = unionMessage(interestingConditions) unionedCondition.Reason = "AsExpected" unionedCondition.LastTransitionTime = latestTransitionTime(interestingConditions) @@ -66,6 +85,16 @@ func latestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time return latestTransitionTime } +func earliestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time { + earliestTransitionTime := metav1.Now() + for _, condition := range conditions { + if !earliestTransitionTime.Before(&condition.LastTransitionTime) { + earliestTransitionTime = condition.LastTransitionTime + } + } + return earliestTransitionTime +} + func uniq(s []string) []string { seen := make(map[string]struct{}, len(s)) j := 0 diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go index 9ed673c614c7..06f30464440d 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go @@ -19,6 +19,7 @@ import ( configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1" configv1listers "github.com/openshift/client-go/config/listers/config/v1" + configv1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/management" @@ -45,10 +46,10 @@ type StatusSyncer struct { operatorClient operatorv1helpers.OperatorClient clusterOperatorClient configv1client.ClusterOperatorsGetter clusterOperatorLister configv1listers.ClusterOperatorLister - eventRecorder events.Recorder - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } func NewClusterOperatorStatusController( @@ -75,6 +76,9 @@ func NewClusterOperatorStatusController( operatorClient.Informer().AddEventHandler(c.eventHandler()) clusterOperatorInformer.Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, clusterOperatorInformer.Informer().HasSynced) + return c } @@ -124,7 +128,7 @@ func (c StatusSyncer) sync() error { configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorFailing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) { @@ -138,7 +142,7 @@ func (c StatusSyncer) sync() error { } clusterOperatorObj.Status.RelatedObjects = c.relatedObjects - configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Failing", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionInertialCondition("Degraded", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Progressing", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Available", operatorv1.ConditionTrue, currentDetailedStatus.Conditions...)) configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Upgradeable", operatorv1.ConditionTrue, currentDetailedStatus.Conditions...)) @@ -178,6 +182,9 @@ func (c *StatusSyncer) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting StatusSyncer-" + c.clusterOperatorName) defer klog.Infof("Shutting down StatusSyncer-" + c.clusterOperatorName) + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } // start watching for version changes go c.watchVersionGetter(stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go index 30e9a0ff9070..05b94854d3a4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go @@ -4,6 +4,7 @@ import ( "reflect" "strings" "testing" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" @@ -13,91 +14,214 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/client-go/config/clientset/versioned/fake" configv1listers "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" "github.com/openshift/library-go/pkg/operator/events" ) -func TestFailing(t *testing.T) { +func TestDegraded(t *testing.T) { + + twoMinutesAgo := metav1.NewTime(time.Now().Add(-2 * time.Minute)) + fiveSecondsAgo := metav1.NewTime(time.Now().Add(-2 * time.Second)) + yesterday := metav1.NewTime(time.Now().Add(-24 * time.Hour)) testCases := []struct { - name string - conditions []operatorv1.OperatorCondition - expectedFailingStatus configv1.ConditionStatus - expectedMessages []string - expectedReason string + name string + conditions []operatorv1.OperatorCondition + expectedType configv1.ClusterStatusConditionType + expectedStatus configv1.ConditionStatus + expectedMessages []string + expectedReason string }{ { - name: "no data", - conditions: []operatorv1.OperatorCondition{}, - expectedFailingStatus: configv1.ConditionUnknown, - expectedReason: "NoData", + name: "no data", + conditions: []operatorv1.OperatorCondition{}, + expectedStatus: configv1.ConditionUnknown, + expectedReason: "NoData", }, { - name: "one failing false", + name: "one not failing/within threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeAFailing", Status: operatorv1.ConditionFalse}, + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeADegraded: a message from type a", }, - expectedFailingStatus: configv1.ConditionFalse, - expectedReason: "AsExpected", }, { - name: "one failing true", + name: "one not failing/beyond threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeAFailing", Status: operatorv1.ConditionTrue}, + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type a"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeADegraded: a message from type a", }, - expectedFailingStatus: configv1.ConditionTrue, - expectedReason: "TypeAFailing", }, { - name: "two present, one failing", + name: "one failing/within threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeAFailing", Status: operatorv1.ConditionTrue, Message: "a message from type a"}, - {Type: "TypeBFailing", Status: operatorv1.ConditionFalse}, + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, }, - expectedFailingStatus: configv1.ConditionTrue, - expectedReason: "TypeAFailing", + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", expectedMessages: []string{ - "TypeAFailing: a message from type a", + "TypeADegraded: a message from type a", }, }, { - name: "two present, second one failing", + name: "one failing/beyond threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeAFailing", Status: operatorv1.ConditionFalse}, - {Type: "TypeBFailing", Status: operatorv1.ConditionTrue, Message: "a message from type b"}, + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, Message: "a message from type a", LastTransitionTime: twoMinutesAgo}, }, - expectedFailingStatus: configv1.ConditionTrue, - expectedReason: "TypeBFailing", + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeADegraded", expectedMessages: []string{ - "TypeBFailing: a message from type b", + "TypeADegraded: a message from type a", }, }, { - name: "many present, some failing", + name: "two present/one failing/within threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeAFailing", Status: operatorv1.ConditionFalse}, - {Type: "TypeBFailing", Status: operatorv1.ConditionTrue, Message: "a message from type b\nanother message from type b"}, - {Type: "TypeCFailing", Status: operatorv1.ConditionFalse, Message: "a message from type c"}, - {Type: "TypeDFailing", Status: operatorv1.ConditionTrue, Message: "a message from type d"}, + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, }, - expectedFailingStatus: configv1.ConditionTrue, - expectedReason: "MultipleConditionsMatching", + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", expectedMessages: []string{ - "TypeBFailing: a message from type b", - "TypeBFailing: another message from type b", - "TypeDFailing: a message from type d", + "TypeADegraded: a message from type a", + }, + }, + { + name: "two present/one failing/beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type a"}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeADegraded", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "two present/second one failing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + }, + }, + { + name: "two present/second one failing/beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type b"}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeBDegraded", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + }, + }, + { + name: "many present/some failing/all within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type d"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + "TypeBDegraded: another message from type b", + "TypeCDegraded: a message from type c", + "TypeDDegraded: a message from type d", + }, + }, + { + name: "many present/some failing some/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type d"}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "MultipleConditionsMatching", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + "TypeBDegraded: another message from type b", + "TypeDDegraded: a message from type d", + }, + }, + { + name: "many present/some failing/all beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type d"}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "MultipleConditionsMatching", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + "TypeBDegraded: another message from type b", + "TypeDDegraded: a message from type d", + }, + }, + { + name: "one progressing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeAProgressing", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedType: configv1.OperatorProgressing, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeAProgressing", + expectedMessages: []string{ + "TypeAProgressing: a message from type a", + }, + }, + { + name: "one not available/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeAAvailable", Status: operatorv1.ConditionFalse, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedType: configv1.OperatorAvailable, + expectedStatus: configv1.ConditionFalse, + expectedReason: "TypeAAvailable", + expectedMessages: []string{ + "TypeAAvailable: a message from type a", }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - clusteroperator := &configv1.ClusterOperator{ + for _, condition := range tc.conditions { + if condition.LastTransitionTime == (metav1.Time{}) { + t.Fatal("LastTransitionTime not set.") + } + } + if tc.expectedType == "" { + tc.expectedType = configv1.OperatorDegraded + } + clusterOperator := &configv1.ClusterOperator{ ObjectMeta: metav1.ObjectMeta{Name: "OPERATOR_NAME", ResourceVersion: "12"}, } - clusterOperatorClient := fake.NewSimpleClientset(clusteroperator) + clusterOperatorClient := fake.NewSimpleClientset(clusterOperator) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - indexer.Add(clusteroperator) + indexer.Add(clusterOperator) statusClient := &statusClient{ t: t, @@ -120,10 +244,10 @@ func TestFailing(t *testing.T) { result, _ := clusterOperatorClient.ConfigV1().ClusterOperators().Get("OPERATOR_NAME", metav1.GetOptions{}) var expectedCondition *configv1.ClusterOperatorStatusCondition - if tc.expectedFailingStatus != "" { + if tc.expectedStatus != "" { expectedCondition = &configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorFailing, - Status: configv1.ConditionStatus(string(tc.expectedFailingStatus)), + Type: tc.expectedType, + Status: configv1.ConditionStatus(string(tc.expectedStatus)), } if len(tc.expectedMessages) > 0 { expectedCondition.Message = strings.Join(tc.expectedMessages, "\n") @@ -137,7 +261,7 @@ func TestFailing(t *testing.T) { result.Status.Conditions[i].LastTransitionTime = metav1.Time{} } - actual := v1helpers.FindStatusCondition(result.Status.Conditions, "Failing") + actual := v1helpers.FindStatusCondition(result.Status.Conditions, tc.expectedType) if !reflect.DeepEqual(expectedCondition, actual) { t.Error(diff.ObjectDiff(expectedCondition, actual)) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go index 92c61eae813b..5543a602d27d 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go @@ -1,6 +1,7 @@ package status import ( + "os" "sync" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" @@ -16,6 +17,10 @@ type versionGetter struct { notificationChannels []chan struct{} } +const ( + operandImageVersionEnvVarName = "OPERAND_IMAGE_VERSION" +) + func NewVersionGetter() VersionGetter { return &versionGetter{ versions: map[string]string{}, @@ -57,6 +62,10 @@ func (v *versionGetter) VersionChangedChannel() <-chan struct{} { return channel } +func VersionForOperandFromEnv() string { + return os.Getenv(operandImageVersionEnvVarName) +} + func VersionForOperand(namespace, imagePullSpec string, configMapGetter corev1client.ConfigMapsGetter, eventRecorder events.Recorder) string { versionMap := map[string]string{} versionMapping, err := configMapGetter.ConfigMaps(namespace).Get("version-mapping", metav1.GetOptions{}) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go index f118b7a0a3f6..2f659617b203 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go @@ -30,13 +30,11 @@ const ( // UnsupportedConfigOverridesController is a controller that will copy source configmaps and secrets to their destinations. // It will also mirror deletions by deleting destinations. type UnsupportedConfigOverridesController struct { - preRunCachesSynced []cache.InformerSynced - - // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface - operatorClient v1helpers.OperatorClient - eventRecorder events.Recorder + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder } // NewUnsupportedConfigOverridesController creates UnsupportedConfigOverridesController. @@ -46,16 +44,15 @@ func NewUnsupportedConfigOverridesController( ) *UnsupportedConfigOverridesController { c := &UnsupportedConfigOverridesController{ operatorClient: operatorClient, - eventRecorder: eventRecorder.WithComponentSuffix("unsupported-config-overrides-controller"), - preRunCachesSynced: []cache.InformerSynced{ - operatorClient.Informer().HasSynced, - }, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "UnsupportedConfigOverridesController"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "UnsupportedConfigOverridesController"), + eventRecorder: eventRecorder.WithComponentSuffix("unsupported-config-overrides-controller"), } operatorClient.Informer().AddEventHandler(c.eventHandler()) + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + return c } @@ -153,7 +150,7 @@ func (c *UnsupportedConfigOverridesController) Run(workers int, stopCh <-chan st klog.Infof("Starting UnsupportedConfigOverridesController") defer klog.Infof("Shutting down UnsupportedConfigOverridesController") - if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { return } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go index 2bb7cc63591d..4afb23a61215 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -9,9 +9,9 @@ type OperatorClient interface { Informer() cache.SharedIndexInformer // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) - // UpdateOperatorSpec updates the spec of the operator, assuming the given resource verison. + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) - // UpdateOperatorStatus updates the status of the operator, assuming the given resource verison. + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) } @@ -25,4 +25,6 @@ type StaticPodOperatorClient interface { GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go index 258b33c3c7b5..014585c55190 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -59,26 +59,26 @@ func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { } // NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. -func NewFakeStaticPodOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, +func NewFakeStaticPodOperatorClient( staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, - triggerErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error) StaticPodOperatorClient { + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { return &fakeStaticPodOperatorClient{ - fakeOperatorSpec: spec, - fakeOperatorStatus: status, fakeStaticPodOperatorSpec: staticPodSpec, fakeStaticPodOperatorStatus: staticPodStatus, resourceVersion: "0", - triggerStatusUpdateError: triggerErr, + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, } } type fakeStaticPodOperatorClient struct { - fakeOperatorSpec *operatorv1.OperatorSpec fakeOperatorStatus *operatorv1.OperatorStatus fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus resourceVersion string triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error } func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { @@ -111,8 +111,26 @@ func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVers return c.fakeStaticPodOperatorStatus, nil } +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { - panic("not supported") + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, "", nil } func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { panic("not supported") diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml new file mode 100644 index 000000000000..3deb4a124302 --- /dev/null +++ b/vendor/github.com/pborman/uuid/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - "1.9" + - "1.10" + - "1.11" + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md new file mode 100644 index 000000000000..04fdf09f136b --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md new file mode 100644 index 000000000000..810ad40dc930 --- /dev/null +++ b/vendor/github.com/pborman/uuid/README.md @@ -0,0 +1,15 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go old mode 100755 new mode 100644 diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go old mode 100755 new mode 100644 index d8bd013e6890..727d76167459 --- a/vendor/github.com/pborman/uuid/doc.go +++ b/vendor/github.com/pborman/uuid/doc.go @@ -4,5 +4,10 @@ // The uuid package generates and inspects UUIDs. // -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// This package is a partial wrapper around the github.com/google/uuid package. +// This package represents a UUID as []byte while github.com/google/uuid +// represents a UUID as [16]byte. package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod new file mode 100644 index 000000000000..099fc7de0d55 --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.mod @@ -0,0 +1,3 @@ +module github.com/pborman/uuid + +require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum new file mode 100644 index 000000000000..db2574a9c3f2 --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.sum @@ -0,0 +1,2 @@ +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go index cdd4192fd9b9..a0420c1ef3a9 100644 --- a/vendor/github.com/pborman/uuid/hash.go +++ b/vendor/github.com/pborman/uuid/hash.go @@ -19,7 +19,7 @@ var ( NIL = Parse("00000000-0000-0000-0000-000000000000") ) -// NewHash returns a new UUID dervied from the hash of space concatenated with +// NewHash returns a new UUID derived from the hash of space concatenated with // data generated by h. The hash should be at least 16 byte in length. The // first 16 bytes of the hash are used to form the UUID. The version of the // UUID will be the lower 4 bits of version. NewHash is used to implement diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go deleted file mode 100644 index 760580a504f4..000000000000 --- a/vendor/github.com/pborman/uuid/json.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "errors" - -func (u UUID) MarshalJSON() ([]byte, error) { - if len(u) == 0 { - return []byte(`""`), nil - } - return []byte(`"` + u.String() + `"`), nil -} - -func (u *UUID) UnmarshalJSON(data []byte) error { - if len(data) == 0 || string(data) == `""` { - return nil - } - if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - return errors.New("invalid UUID format") - } - data = data[1 : len(data)-1] - uu := Parse(string(data)) - if uu == nil { - return errors.New("invalid UUID format") - } - *u = uu - return nil -} diff --git a/vendor/github.com/pborman/uuid/json_test.go b/vendor/github.com/pborman/uuid/json_test.go deleted file mode 100644 index b5eae092472c..000000000000 --- a/vendor/github.com/pborman/uuid/json_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/json" - "reflect" - "testing" -) - -var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") - -func TestJSON(t *testing.T) { - type S struct { - ID1 UUID - ID2 UUID - } - s1 := S{ID1: testUUID} - data, err := json.Marshal(&s1) - if err != nil { - t.Fatal(err) - } - var s2 S - if err := json.Unmarshal(data, &s2); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(&s1, &s2) { - t.Errorf("got %#v, want %#v", s2, s1) - } -} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go new file mode 100644 index 000000000000..35b89352ad70 --- /dev/null +++ b/vendor/github.com/pborman/uuid/marshal.go @@ -0,0 +1,85 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" + + guuid "github.com/google/uuid" +) + +// MarshalText implements encoding.TextMarshaler. +func (u UUID) MarshalText() ([]byte, error) { + if len(u) != 16 { + return nil, nil + } + var js [36]byte + encodeHex(js[:], u) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *UUID) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + id := Parse(string(data)) + if id == nil { + return errors.New("invalid UUID") + } + *u = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u UUID) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + var id [16]byte + copy(id[:], data) + *u = id[:] + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (u Array) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], u[:]) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *Array) UnmarshalText(data []byte) error { + id, err := guuid.ParseBytes(data) + if err != nil { + return err + } + *u = Array(id) + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u Array) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *Array) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(u[:], data) + return nil +} diff --git a/vendor/github.com/pborman/uuid/marshal_test.go b/vendor/github.com/pborman/uuid/marshal_test.go new file mode 100644 index 000000000000..4e85b6bab9df --- /dev/null +++ b/vendor/github.com/pborman/uuid/marshal_test.go @@ -0,0 +1,124 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" +) + +var testUUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") +var testArray = testUUID.Array() + +func TestJSON(t *testing.T) { + type S struct { + ID1 UUID + ID2 UUID + } + s1 := S{ID1: testUUID} + data, err := json.Marshal(&s1) + if err != nil { + t.Fatal(err) + } + var s2 S + if err := json.Unmarshal(data, &s2); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&s1, &s2) { + t.Errorf("got %#v, want %#v", s2, s1) + } +} + +func TestJSONArray(t *testing.T) { + type S struct { + ID1 Array + ID2 Array + } + s1 := S{ID1: testArray} + data, err := json.Marshal(&s1) + if err != nil { + t.Fatal(err) + } + var s2 S + if err := json.Unmarshal(data, &s2); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&s1, &s2) { + t.Errorf("got %#v, want %#v", s2, s1) + } +} + +func TestMarshal(t *testing.T) { + data, err := testUUID.MarshalBinary() + if err != nil { + t.Fatalf("MarhsalBinary returned unexpected error %v", err) + } + if !bytes.Equal(data, testUUID) { + t.Fatalf("MarhsalBinary returns %x, want %x", data, testUUID) + } + var u UUID + u.UnmarshalBinary(data) + if !Equal(data, u) { + t.Fatalf("UnmarhsalBinary returns %v, want %v", u, testUUID) + } +} + +func TestMarshalArray(t *testing.T) { + data, err := testArray.MarshalBinary() + if err != nil { + t.Fatalf("MarhsalBinary returned unexpected error %v", err) + } + if !bytes.Equal(data, testUUID) { + t.Fatalf("MarhsalBinary returns %x, want %x", data, testUUID) + } + var a Array + a.UnmarshalBinary(data) + if a != testArray { + t.Fatalf("UnmarhsalBinary returns %v, want %v", a, testArray) + } +} + +func TestMarshalTextArray(t *testing.T) { + data, err := testArray.MarshalText() + if err != nil { + t.Fatalf("MarhsalText returned unexpected error %v", err) + } + var a Array + a.UnmarshalText(data) + if a != testArray { + t.Fatalf("UnmarhsalText returns %v, want %v", a, testArray) + } +} + +func BenchmarkUUID_MarshalJSON(b *testing.B) { + x := &struct { + UUID UUID `json:"uuid"` + }{} + x.UUID = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if x.UUID == nil { + b.Fatal("invalid uuid") + } + for i := 0; i < b.N; i++ { + js, err := json.Marshal(x) + if err != nil { + b.Fatalf("marshal json: %#v (%v)", js, err) + } + } +} + +func BenchmarkUUID_UnmarshalJSON(b *testing.B) { + js := []byte(`{"uuid":"f47ac10b-58cc-0372-8567-0e02b2c3d479"}`) + var x *struct { + UUID UUID `json:"uuid"` + } + for i := 0; i < b.N; i++ { + err := json.Unmarshal(js, &x) + if err != nil { + b.Fatalf("marshal json: %#v (%v)", js, err) + } + } +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go old mode 100755 new mode 100644 index dd0a8ac189a3..e524e0101b47 --- a/vendor/github.com/pborman/uuid/node.go +++ b/vendor/github.com/pborman/uuid/node.go @@ -4,19 +4,15 @@ package uuid -import "net" - -var ( - interfaces []net.Interface // cached list of interfaces - ifname string // name of interface being used - nodeID []byte // hardware for version 1 UUIDs +import ( + guuid "github.com/google/uuid" ) // NodeInterface returns the name of the interface from which the NodeID was // derived. The interface "user" is returned if the NodeID was set by // SetNodeID. func NodeInterface() string { - return ifname + return guuid.NodeInterface() } // SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. @@ -26,67 +22,20 @@ func NodeInterface() string { // // SetNodeInterface never fails when name is "". func SetNodeInterface(name string) bool { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil && name != "" { - return false - } - } - - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - if setNodeID(ifs.HardwareAddr) { - ifname = ifs.Name - return true - } - } - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - if nodeID == nil { - nodeID = make([]byte, 6) - } - randomBits(nodeID) - return true - } - return false + return guuid.SetNodeInterface(name) } // NodeID returns a slice of a copy of the current Node ID, setting the Node ID // if not already set. func NodeID() []byte { - if nodeID == nil { - SetNodeInterface("") - } - nid := make([]byte, 6) - copy(nid, nodeID) - return nid + return guuid.NodeID() } // SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes // of id are used. If id is less than 6 bytes then false is returned and the // Node ID is not set. func SetNodeID(id []byte) bool { - if setNodeID(id) { - ifname = "user" - return true - } - return false -} - -func setNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - if nodeID == nil { - nodeID = make([]byte, 6) - } - copy(nodeID, id) - return true + return guuid.SetNodeID(id) } // NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 000000000000..929c3847e2a8 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + parsed := make([]byte, 16) + copy(parsed, b) + *uuid = UUID(parsed) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/pborman/uuid/sql_test.go b/vendor/github.com/pborman/uuid/sql_test.go new file mode 100644 index 000000000000..103095156945 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql_test.go @@ -0,0 +1,96 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "strings" + "testing" +) + +func TestScan(t *testing.T) { + var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479" + var byteTest []byte = Parse(stringTest) + var badTypeTest int = 6 + var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4" + + // sunny day tests + + var uuid UUID + err := (&uuid).Scan(stringTest) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan([]byte(stringTest)) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan(byteTest) + if err != nil { + t.Fatal(err) + } + + // bad type tests + + err = (&uuid).Scan(badTypeTest) + if err == nil { + t.Error("int correctly parsed and shouldn't have") + } + if !strings.Contains(err.Error(), "unable to scan type") { + t.Error("attempting to parse an int returned an incorrect error message") + } + + // invalid/incomplete uuids + + err = (&uuid).Scan(invalidTest) + if err == nil { + t.Error("invalid uuid was parsed without error") + } + if !strings.Contains(err.Error(), "invalid UUID") { + t.Error("attempting to parse an invalid UUID returned an incorrect error message") + } + + err = (&uuid).Scan(byteTest[:len(byteTest)-2]) + if err == nil { + t.Error("invalid byte uuid was parsed without error") + } + if !strings.Contains(err.Error(), "invalid UUID") { + t.Error("attempting to parse an invalid byte UUID returned an incorrect error message") + } + + // empty tests + + uuid = nil + var emptySlice []byte + err = (&uuid).Scan(emptySlice) + if err != nil { + t.Fatal(err) + } + + if uuid != nil { + t.Error("UUID was not nil after scanning empty byte slice") + } + + uuid = nil + var emptyString string + err = (&uuid).Scan(emptyString) + if err != nil { + t.Fatal(err) + } + + if uuid != nil { + t.Error("UUID was not nil after scanning empty string") + } +} + +func TestValue(t *testing.T) { + stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479" + uuid := Parse(stringTest) + val, _ := uuid.Value() + if val != stringTest { + t.Error("Value() did not return expected string") + } +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go old mode 100755 new mode 100644 index 7ebc9bef1090..5c0960d8723a --- a/vendor/github.com/pborman/uuid/time.go +++ b/vendor/github.com/pborman/uuid/time.go @@ -6,65 +6,18 @@ package uuid import ( "encoding/binary" - "sync" - "time" + + guuid "github.com/google/uuid" ) // A Time represents a time as the number of 100's of nanoseconds since 15 Oct // 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - mu sync.Mutex - lasttime uint64 // last time we returned - clock_seq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} +type Time = guuid.Time // GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and // clock sequence as well as adjusting the clock sequence as needed. An error // is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer mu.Unlock() - mu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clock_seq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clock_seq, nil -} +func GetTime() (Time, uint16, error) { return guuid.GetTime() } // ClockSequence returns the current clock sequence, generating one if not // already set. The clock sequence is only used for Version 1 UUIDs. @@ -74,39 +27,11 @@ func getTime() (Time, uint16, error) { // clock sequence is generated the first time a clock sequence is requested by // ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated // for -func ClockSequence() int { - defer mu.Unlock() - mu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clock_seq == 0 { - setClockSequence(-1) - } - return int(clock_seq & 0x3fff) -} +func ClockSequence() int { return guuid.ClockSequence() } // SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to // -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer mu.Unlock() - mu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - old_seq := clock_seq - clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if old_seq != clock_seq { - lasttime = 0 - } -} +func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in // uuid. It returns false if uuid is not valid. The time is only well defined diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go index de40b102c4be..255b5e24859d 100644 --- a/vendor/github.com/pborman/uuid/util.go +++ b/vendor/github.com/pborman/uuid/util.go @@ -4,19 +4,8 @@ package uuid -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - // xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = []byte{ +var xvalues = [256]byte{ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go old mode 100755 new mode 100644 index 2920fae63267..337000420729 --- a/vendor/github.com/pborman/uuid/uuid.go +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -7,28 +7,43 @@ package uuid import ( "bytes" "crypto/rand" - "fmt" + "encoding/hex" "io" - "strings" + + guuid "github.com/google/uuid" ) +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return guuid.UUID(uuid).String() +} + // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC // 4122. type UUID []byte // A Version represents a UUIDs version. -type Version byte +type Version = guuid.Version // A Variant represents a UUIDs variant. -type Variant byte +type Variant = guuid.Variant // Constants returned by Variant. const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. + Invalid = guuid.Invalid // Invalid UUID + RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 + Reserved = guuid.Reserved // Reserved, NCS backward compatibility. + Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future = guuid.Future // Reserved for future definition. ) var rander = rand.Reader // random function @@ -39,35 +54,23 @@ func New() string { return NewRandom().String() } -// Parse decodes s into a UUID or returns nil. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for +// the formats parsed. func Parse(s string) UUID { - if len(s) == 36+9 { - if strings.ToLower(s[:9]) != "urn:uuid:" { - return nil - } - s = s[9:] - } else if len(s) != 36 { - return nil + gu, err := guuid.Parse(s) + if err == nil { + return gu[:] } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return nil - } - uuid := make([]byte, 16) - for i, x := range []int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - if v, ok := xtob(s[x:]); !ok { - return nil - } else { - uuid[i] = v - } + return nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + gu, err := guuid.ParseBytes(b) + if err == nil { + return gu[:], nil } - return uuid + return nil, err } // Equal returns true if uuid1 and uuid2 are equal. @@ -75,26 +78,50 @@ func Equal(uuid1, uuid2 UUID) bool { return bytes.Equal(uuid1, uuid2) } +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { - if uuid == nil || len(uuid) != 16 { + if len(uuid) != 16 { return "" } - b := []byte(uuid) - return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", - b[:4], b[4:6], b[6:8], b[8:10], b[10:]) + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) } // URN returns the RFC 2141 URN form of uuid, // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. func (uuid UUID) URN() string { - if uuid == nil || len(uuid) != 16 { + if len(uuid) != 16 { return "" } - b := []byte(uuid) - return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", - b[:4], b[4:6], b[6:8], b[8:10], b[10:]) + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) } // Variant returns the variant encoded in uuid. It returns Invalid if @@ -113,10 +140,9 @@ func (uuid UUID) Variant() Variant { default: return Reserved } - panic("unreachable") } -// Version returns the verison of uuid. It returns false if uuid is not +// Version returns the version of uuid. It returns false if uuid is not // valid. func (uuid UUID) Version() (Version, bool) { if len(uuid) != 16 { @@ -125,39 +151,12 @@ func (uuid UUID) Version() (Version, bool) { return Version(uuid[6] >> 4), true } -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implents io.Reader. +// SetRand sets the random number generator to r, which implements io.Reader. // If r.Read returns an error when the package requests random data then // a panic will be issued. // // Calling SetRand with nil sets the random number generator to the default // generator. func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r + guuid.SetRand(r) } diff --git a/vendor/github.com/pborman/uuid/uuid_test.go b/vendor/github.com/pborman/uuid/uuid_test.go old mode 100755 new mode 100644 index 417ebeb26aa1..7e9d144fba75 --- a/vendor/github.com/pborman/uuid/uuid_test.go +++ b/vendor/github.com/pborman/uuid/uuid_test.go @@ -4,13 +4,15 @@ package uuid +// Some of these tests can probably be removed as they are redundant with the +// tests in github.com/google/uuid. + import ( "bytes" "fmt" "os" "strings" "testing" - "time" ) type test struct { @@ -112,7 +114,7 @@ func TestConstants(t *testing.T) { t.Errorf("%x: %v: not a stringer", x, v) } else if s := v.String(); s != tt.name { v, _ := tt.c.(int) - t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name) + t.Errorf("%x: Constant %T:%d gives %q, expected %q", x, tt.c, v, s, tt.name) } } } @@ -123,14 +125,14 @@ func TestRandomUUID(t *testing.T) { uuid := NewRandom() s := uuid.String() if m[s] { - t.Errorf("NewRandom returned duplicated UUID %s\n", s) + t.Errorf("NewRandom returned duplicated UUID %s", s) } m[s] = true if v, _ := uuid.Version(); v != 4 { - t.Errorf("Random UUID of version %s\n", v) + t.Errorf("Random UUID of version %s", v) } if uuid.Variant() != RFC4122 { - t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + t.Errorf("Random UUID is variant %d", uuid.Variant()) } } } @@ -140,68 +142,23 @@ func TestNew(t *testing.T) { for x := 1; x < 32; x++ { s := New() if m[s] { - t.Errorf("New returned duplicated UUID %s\n", s) + t.Errorf("New returned duplicated UUID %s", s) } m[s] = true uuid := Parse(s) if uuid == nil { - t.Errorf("New returned %q which does not decode\n", s) + t.Errorf("New returned %q which does not decode", s) continue } if v, _ := uuid.Version(); v != 4 { - t.Errorf("Random UUID of version %s\n", v) + t.Errorf("Random UUID of version %s", v) } if uuid.Variant() != RFC4122 { - t.Errorf("Random UUID is variant %d\n", uuid.Variant()) + t.Errorf("Random UUID is variant %d", uuid.Variant()) } } } -func clockSeq(t *testing.T, uuid UUID) int { - seq, ok := uuid.ClockSequence() - if !ok { - t.Fatalf("%s: invalid clock sequence\n", uuid) - } - return seq -} - -func TestClockSeq(t *testing.T) { - // Fake time.Now for this test to return a monotonically advancing time; restore it at end. - defer func(orig func() time.Time) { timeNow = orig }(timeNow) - monTime := time.Now() - timeNow = func() time.Time { - monTime = monTime.Add(1 * time.Second) - return monTime - } - - SetClockSequence(-1) - uuid1 := NewUUID() - uuid2 := NewUUID() - - if clockSeq(t, uuid1) != clockSeq(t, uuid2) { - t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2)) - } - - SetClockSequence(-1) - uuid2 = NewUUID() - - // Just on the very off chance we generated the same sequence - // two times we try again. - if clockSeq(t, uuid1) == clockSeq(t, uuid2) { - SetClockSequence(-1) - uuid2 = NewUUID() - } - if clockSeq(t, uuid1) == clockSeq(t, uuid2) { - t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1)) - } - - SetClockSequence(0x1234) - uuid1 = NewUUID() - if seq := clockSeq(t, uuid1); seq != 0x1234 { - t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq) - } -} - func TestCoding(t *testing.T) { text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" @@ -213,15 +170,15 @@ func TestCoding(t *testing.T) { 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, } if v := data.String(); v != text { - t.Errorf("%x: encoded to %s, expected %s\n", data, v, text) + t.Errorf("%x: encoded to %s, expected %s", data, v, text) } if v := data.URN(); v != urn { - t.Errorf("%x: urn is %s, expected %s\n", data, v, urn) + t.Errorf("%x: urn is %s, expected %s", data, v, urn) } uuid := Parse(text) if !Equal(uuid, data) { - t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data) + t.Errorf("%s: decoded to %s, expected %s", text, uuid, data) } } @@ -230,30 +187,30 @@ func TestVersion1(t *testing.T) { uuid2 := NewUUID() if Equal(uuid1, uuid2) { - t.Errorf("%s:duplicate uuid\n", uuid1) + t.Errorf("%s:duplicate uuid", uuid1) } if v, _ := uuid1.Version(); v != 1 { - t.Errorf("%s: version %s expected 1\n", uuid1, v) + t.Errorf("%s: version %s expected 1", uuid1, v) } if v, _ := uuid2.Version(); v != 1 { - t.Errorf("%s: version %s expected 1\n", uuid2, v) + t.Errorf("%s: version %s expected 1", uuid2, v) } n1 := uuid1.NodeID() n2 := uuid2.NodeID() if !bytes.Equal(n1, n2) { - t.Errorf("Different nodes %x != %x\n", n1, n2) + t.Errorf("Different nodes %x != %x", n1, n2) } t1, ok := uuid1.Time() if !ok { - t.Errorf("%s: invalid time\n", uuid1) + t.Errorf("%s: invalid time", uuid1) } t2, ok := uuid2.Time() if !ok { - t.Errorf("%s: invalid time\n", uuid2) + t.Errorf("%s: invalid time", uuid2) } q1, ok := uuid1.ClockSequence() if !ok { - t.Errorf("%s: invalid clock sequence\n", uuid1) + t.Errorf("%s: invalid clock sequence", uuid1) } q2, ok := uuid2.ClockSequence() if !ok { @@ -262,32 +219,11 @@ func TestVersion1(t *testing.T) { switch { case t1 == t2 && q1 == q2: - t.Errorf("time stopped\n") + t.Error("time stopped") case t1 > t2 && q1 == q2: - t.Errorf("time reversed\n") + t.Error("time reversed") case t1 < t2 && q1 != q2: - t.Errorf("clock sequence chaned unexpectedly\n") - } -} - -func TestNodeAndTime(t *testing.T) { - // Time is February 5, 1998 12:30:23.136364800 AM GMT - - uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") - node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} - - ts, ok := uuid.Time() - if ok { - c := time.Unix(ts.UnixTime()) - want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) - if !c.Equal(want) { - t.Errorf("Got time %v, want %v", c, want) - } - } else { - t.Errorf("%s: bad time\n", uuid) - } - if !bytes.Equal(node, uuid.NodeID()) { - t.Errorf("Expected node %v got %v\n", node, uuid.NodeID()) + t.Error("clock sequence chaned unexpectedly") } } @@ -295,7 +231,7 @@ func TestMD5(t *testing.T) { uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String() want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" if uuid != want { - t.Errorf("MD5: got %q expected %q\n", uuid, want) + t.Errorf("MD5: got %q expected %q", uuid, want) } } @@ -303,58 +239,31 @@ func TestSHA1(t *testing.T) { uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String() want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" if uuid != want { - t.Errorf("SHA1: got %q expected %q\n", uuid, want) - } -} - -func TestNodeID(t *testing.T) { - nid := []byte{1, 2, 3, 4, 5, 6} - SetNodeInterface("") - s := NodeInterface() - if s == "" || s == "user" { - t.Errorf("NodeInterface %q after SetInteface\n", s) - } - node1 := NodeID() - if node1 == nil { - t.Errorf("NodeID nil after SetNodeInterface\n", s) - } - SetNodeID(nid) - s = NodeInterface() - if s != "user" { - t.Errorf("Expected NodeInterface %q got %q\n", "user", s) - } - node2 := NodeID() - if node2 == nil { - t.Errorf("NodeID nil after SetNodeID\n", s) - } - if bytes.Equal(node1, node2) { - t.Errorf("NodeID not changed after SetNodeID\n", s) - } else if !bytes.Equal(nid, node2) { - t.Errorf("NodeID is %x, expected %x\n", node2, nid) + t.Errorf("SHA1: got %q expected %q", uuid, want) } } func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) { if uuid == nil { - t.Errorf("%s failed\n", name) + t.Errorf("%s failed", name) return } if v, _ := uuid.Version(); v != 2 { - t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v) + t.Errorf("%s: %s: expected version 2, got %s", name, uuid, v) return } if v, ok := uuid.Domain(); !ok || v != domain { if !ok { - t.Errorf("%s: %d: Domain failed\n", name, uuid) + t.Errorf("%s: %d: Domain failed", name, uuid) } else { - t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v) + t.Errorf("%s: %s: expected domain %d, got %d", name, uuid, domain, v) } } if v, ok := uuid.Id(); !ok || v != id { if !ok { - t.Errorf("%s: %d: Id failed\n", name, uuid) + t.Errorf("%s: %d: Id failed", name, uuid) } else { - t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v) + t.Errorf("%s: %s: expected id %d, got %d", name, uuid, id, v) } } } @@ -379,12 +288,123 @@ func TestBadRand(t *testing.T) { uuid1 := New() uuid2 := New() if uuid1 != uuid2 { - t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2) + t.Errorf("expected duplicates, got %q and %q", uuid1, uuid2) } SetRand(nil) uuid1 = New() uuid2 = New() if uuid1 == uuid2 { - t.Errorf("unexecpted duplicates, got %q\n", uuid1) + t.Errorf("unexpected duplicates, got %q", uuid1) + } +} + +func TestUUID_Array(t *testing.T) { + expect := Array{ + 0xf4, 0x7a, 0xc1, 0x0b, + 0x58, 0xcc, + 0x03, 0x72, + 0x85, 0x67, + 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, + } + uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if uuid == nil { + t.Fatal("invalid uuid") + } + if uuid.Array() != expect { + t.Fatal("invalid array") + } +} + +func TestArray_UUID(t *testing.T) { + array := Array{ + 0xf4, 0x7a, 0xc1, 0x0b, + 0x58, 0xcc, + 0x03, 0x72, + 0x85, 0x67, + 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, + } + expect := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if expect == nil { + t.Fatal("invalid uuid") + } + if !bytes.Equal(array.UUID(), expect) { + t.Fatal("invalid uuid") + } +} + +func BenchmarkParse(b *testing.B) { + for i := 0; i < b.N; i++ { + uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if uuid == nil { + b.Fatal("invalid uuid") + } + } +} + +func BenchmarkNew(b *testing.B) { + for i := 0; i < b.N; i++ { + New() + } +} + +func BenchmarkUUID_String(b *testing.B) { + uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if uuid == nil { + b.Fatal("invalid uuid") + } + for i := 0; i < b.N; i++ { + if uuid.String() == "" { + b.Fatal("invalid uuid") + } + } +} + +func BenchmarkUUID_URN(b *testing.B) { + uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if uuid == nil { + b.Fatal("invalid uuid") + } + for i := 0; i < b.N; i++ { + if uuid.URN() == "" { + b.Fatal("invalid uuid") + } + } +} + +func BenchmarkUUID_Array(b *testing.B) { + expect := Array{ + 0xf4, 0x7a, 0xc1, 0x0b, + 0x58, 0xcc, + 0x03, 0x72, + 0x85, 0x67, + 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, + } + uuid := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if uuid == nil { + b.Fatal("invalid uuid") + } + for i := 0; i < b.N; i++ { + if uuid.Array() != expect { + b.Fatal("invalid array") + } + } +} + +func BenchmarkArray_UUID(b *testing.B) { + array := Array{ + 0xf4, 0x7a, 0xc1, 0x0b, + 0x58, 0xcc, + 0x03, 0x72, + 0x85, 0x67, + 0x0e, 0x02, 0xb2, 0xc3, 0xd4, 0x79, + } + expect := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if expect == nil { + b.Fatal("invalid uuid") + } + for i := 0; i < b.N; i++ { + if !bytes.Equal(array.UUID(), expect) { + b.Fatal("invalid uuid") + } } } diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go index 0127eacfab8a..7af948da793b 100644 --- a/vendor/github.com/pborman/uuid/version1.go +++ b/vendor/github.com/pborman/uuid/version1.go @@ -5,7 +5,7 @@ package uuid import ( - "encoding/binary" + guuid "github.com/google/uuid" ) // NewUUID returns a Version 1 UUID based on the current NodeID and clock @@ -15,27 +15,9 @@ import ( // SetClockSequence then it will be set automatically. If GetTime fails to // return the current NewUUID returns nil. func NewUUID() UUID { - if nodeID == nil { - SetNodeInterface("") + gu, err := guuid.NewUUID() + if err == nil { + return UUID(gu[:]) } - - now, seq, err := GetTime() - if err != nil { - return nil - } - - uuid := make([]byte, 16) - - time_low := uint32(now & 0xffffffff) - time_mid := uint16((now >> 32) & 0xffff) - time_hi := uint16((now >> 48) & 0x0fff) - time_hi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], time_low) - binary.BigEndian.PutUint16(uuid[4:], time_mid) - binary.BigEndian.PutUint16(uuid[6:], time_hi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID) - - return uuid + return nil } diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go index b3d4a368dd0e..b459d46d13dc 100644 --- a/vendor/github.com/pborman/uuid/version4.go +++ b/vendor/github.com/pborman/uuid/version4.go @@ -4,12 +4,14 @@ package uuid +import guuid "github.com/google/uuid" + // Random returns a Random (Version 4) UUID or panics. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. // -// A note about uniqueness derived from from the UUID Wikipedia entry: +// A note about uniqueness derived from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being // hit by a meteorite is estimated to be one chance in 17 billion, that @@ -17,9 +19,8 @@ package uuid // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() UUID { - uuid := make([]byte, 16) - randomBits([]byte(uuid)) - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid + if gu, err := guuid.NewRandom(); err == nil { + return UUID(gu[:]) + } + return nil } diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go index 3b4afedf1a30..26548b63cef4 100644 --- a/vendor/gopkg.in/inf.v0/dec.go +++ b/vendor/gopkg.in/inf.v0/dec.go @@ -104,7 +104,7 @@ var bigInt = [...]*big.Int{ var exp10cache [64]big.Int = func() [64]big.Int { e10, e10i := [64]big.Int{}, bigInt[1] - for i, _ := range e10 { + for i := range e10 { e10[i].Set(e10i) e10i = new(big.Int).Mul(e10i, bigInt[10]) } diff --git a/vendor/gopkg.in/inf.v0/rounder_example_test.go b/vendor/gopkg.in/inf.v0/rounder_example_test.go index 803c1d7ee56c..4bf36af95342 100644 --- a/vendor/gopkg.in/inf.v0/rounder_example_test.go +++ b/vendor/gopkg.in/inf.v0/rounder_example_test.go @@ -31,7 +31,8 @@ func ExampleRounder() { {"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact}, } - fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n") + fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):") + fmt.Println() w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight) fmt.Fprint(w, "x\ts\t|\t") for _, r := range rounders { diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md index 9f8e698c0bcf..9e97154535d8 100644 --- a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md @@ -87,6 +87,14 @@ the current file is closed, renamed, and a new log file created with the original name. Thus, the filename you give Logger is always the "current" log file. +Backups use the log file name given to Logger, in the form `name-timestamp.ext` +where name is the filename without the extension, timestamp is the time at which +the log was rotated formatted with the time.Time format of +`2006-01-02T15-04-05.000` and the extension is the original extension. For +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created +at 6:30pm on Nov 11 2016 would use the filename +`/var/log/foo/server-2016-11-04T18-30-00.000.log` + ### Cleaning Up Old Log Files Whenever a new logfile gets created, old log files may be deleted. The most recent files according to the encoded timestamp will be retained, up to a diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/linux_test.go b/vendor/gopkg.in/natefinch/lumberjack.v2/linux_test.go index 40f3446685c1..2bd16849b0f9 100644 --- a/vendor/gopkg.in/natefinch/lumberjack.v2/linux_test.go +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/linux_test.go @@ -6,6 +6,7 @@ import ( "os" "syscall" "testing" + "time" ) func TestMaintainMode(t *testing.T) { @@ -15,7 +16,7 @@ func TestMaintainMode(t *testing.T) { filename := logFile(dir) - mode := os.FileMode(0770) + mode := os.FileMode(0600) f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode) isNil(err, t) f.Close() @@ -46,9 +47,9 @@ func TestMaintainMode(t *testing.T) { } func TestMaintainOwner(t *testing.T) { - fakeC := fakeChown{} - os_Chown = fakeC.Set - os_Stat = fakeStat + fakeFS := newFakeFS() + os_Chown = fakeFS.Chown + os_Stat = fakeFS.Stat defer func() { os_Chown = os.Chown os_Stat = os.Stat @@ -59,7 +60,95 @@ func TestMaintainOwner(t *testing.T) { filename := logFile(dir) + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644) + isNil(err, t) + f.Close() + + l := &Logger{ + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + equals(555, fakeFS.files[filename].uid, t) + equals(666, fakeFS.files[filename].gid, t) +} + +func TestCompressMaintainMode(t *testing.T) { + currentTime = fakeTime + + dir := makeTempDir("TestCompressMaintainMode", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + mode := os.FileMode(0600) + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode) + isNil(err, t) + f.Close() + + l := &Logger{ + Compress: true, + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // a compressed version of the log file should now exist with the correct + // mode. + filename2 := backupFile(dir) + info, err := os.Stat(filename) + isNil(err, t) + info2, err := os.Stat(filename2+compressSuffix) + isNil(err, t) + equals(mode, info.Mode(), t) + equals(mode, info2.Mode(), t) +} + +func TestCompressMaintainOwner(t *testing.T) { + fakeFS := newFakeFS() + os_Chown = fakeFS.Chown + os_Stat = fakeFS.Stat + defer func() { + os_Chown = os.Chown + os_Stat = os.Stat + }() + currentTime = fakeTime + dir := makeTempDir("TestCompressMaintainOwner", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644) + isNil(err, t) + f.Close() + l := &Logger{ + Compress: true, Filename: filename, MaxBackups: 1, MaxSize: 100, // megabytes @@ -75,27 +164,39 @@ func TestMaintainOwner(t *testing.T) { err = l.Rotate() isNil(err, t) - equals(555, fakeC.uid, t) - equals(666, fakeC.gid, t) + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // a compressed version of the log file should now exist with the correct + // owner. + filename2 := backupFile(dir) + equals(555, fakeFS.files[filename2+compressSuffix].uid, t) + equals(666, fakeFS.files[filename2+compressSuffix].gid, t) +} + +type fakeFile struct { + uid int + gid int +} + +type fakeFS struct { + files map[string]fakeFile } -type fakeChown struct { - name string - uid int - gid int +func newFakeFS() *fakeFS { + return &fakeFS{files: make(map[string]fakeFile)} } -func (f *fakeChown) Set(name string, uid, gid int) error { - f.name = name - f.uid = uid - f.gid = gid +func (fs *fakeFS) Chown(name string, uid, gid int) error { + fs.files[name] = fakeFile{uid: uid, gid: gid} return nil } -func fakeStat(name string) (os.FileInfo, error) { +func (fs *fakeFS) Stat(name string) (os.FileInfo, error) { info, err := os.Stat(name) if err != nil { - return info, err + return nil, err } stat := info.Sys().(*syscall.Stat_t) stat.Uid = 555 diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go index 701444411e32..ca19da440828 100644 --- a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -22,6 +22,8 @@ package lumberjack import ( + "compress/gzip" + "errors" "fmt" "io" "io/ioutil" @@ -35,6 +37,7 @@ import ( const ( backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" defaultMaxSize = 100 ) @@ -55,6 +58,14 @@ var _ io.WriteCloser = (*Logger)(nil) // original name. Thus, the filename you give Logger is always the "current" log // file. // +// Backups use the log file name given to Logger, in the form +// `name-timestamp.ext` where name is the filename without the extension, +// timestamp is the time at which the log was rotated formatted with the +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the +// original extension. For example, if your Logger.Filename is +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log` +// // Cleaning Up Old Log Files // // Whenever a new logfile gets created, old log files may be deleted. The most @@ -92,9 +103,16 @@ type Logger struct { // time. LocalTime bool `json:"localtime" yaml:"localtime"` + // Compress determines if the rotated log files should be compressed + // using gzip. + Compress bool `json:"compress" yaml:"compress"` + size int64 file *os.File mu sync.Mutex + + millCh chan bool + startMill sync.Once } var ( @@ -163,8 +181,8 @@ func (l *Logger) close() error { // Rotate causes Logger to close the existing log file and immediately create a // new one. This is a helper function for applications that want to initiate // rotations outside of the normal rotation rules, such as in response to -// SIGHUP. After rotating, this initiates a cleanup of old log files according -// to the normal rules. +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. func (l *Logger) Rotate() error { l.mu.Lock() defer l.mu.Unlock() @@ -173,16 +191,16 @@ func (l *Logger) Rotate() error { // rotate closes the current file, moves it aside with a timestamp in the name, // (if it exists), opens a new file with the original filename, and then runs -// cleanup. +// post-rotation processing and removal. func (l *Logger) rotate() error { if err := l.close(); err != nil { return err } - if err := l.openNew(); err != nil { return err } - return l.cleanup() + l.mill() + return nil } // openNew opens a new log file for writing, moving any old log file out of the @@ -244,6 +262,8 @@ func backupName(name string, local bool) string { // would not put it over MaxSize. If there is no such file or the write would // put it over the MaxSize, a new file is created. func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + filename := l.filename() info, err := os_Stat(filename) if os.IsNotExist(err) { @@ -277,10 +297,12 @@ func (l *Logger) filename() string { return filepath.Join(os.TempDir(), name) } -// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as // none of them are older than MaxAge. -func (l *Logger) cleanup() error { - if l.MaxBackups == 0 && l.MaxAge == 0 { +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { return nil } @@ -289,38 +311,87 @@ func (l *Logger) cleanup() error { return err } - var deletes []logInfo + var compress, remove []logInfo if l.MaxBackups > 0 && l.MaxBackups < len(files) { - deletes = files[l.MaxBackups:] - files = files[:l.MaxBackups] + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining } if l.MaxAge > 0 { diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) - cutoff := currentTime().Add(-1 * diff) + var remaining []logInfo for _, f := range files { if f.timestamp.Before(cutoff) { - deletes = append(deletes, f) + remove = append(remove, f) + } else { + remaining = append(remaining, f) } } + files = remaining } - if len(deletes) == 0 { - return nil + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } } - go deleteAll(l.dir(), deletes) + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } - return nil + return err } -func deleteAll(dir string, files []logInfo) { - // remove files on a separate goroutine - for _, f := range files { +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for _ = range l.millCh { // what am I going to do, log this? - _ = os.Remove(filepath.Join(dir, f.Name())) + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: } } @@ -339,13 +410,13 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) { if f.IsDir() { continue } - name := l.timeFromName(f.Name(), prefix, ext) - if name == "" { + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) continue } - t, err := time.Parse(backupTimeFormat, name) - if err == nil { + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { logFiles = append(logFiles, logInfo{t, f}) + continue } // error parsing means that the suffix at the end was not generated // by lumberjack, and therefore it's not a backup file. @@ -359,17 +430,15 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) { // timeFromName extracts the formatted time from the filename by stripping off // the filename's prefix and extension. This prevents someone's filename from // confusing time.parse. -func (l *Logger) timeFromName(filename, prefix, ext string) string { +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { if !strings.HasPrefix(filename, prefix) { - return "" + return time.Time{}, errors.New("mismatched prefix") } - filename = filename[len(prefix):] - if !strings.HasSuffix(filename, ext) { - return "" + return time.Time{}, errors.New("mismatched extension") } - filename = filename[:len(filename)-len(ext)] - return filename + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) } // max returns the maximum size in bytes of log files before rolling. @@ -394,6 +463,61 @@ func (l *Logger) prefixAndExt() (prefix, ext string) { return prefix, ext } +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := os_Stat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + // logInfo is a convenience struct to return the filename and its embedded // timestamp. type logInfo struct { diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go index c11dc1872ffb..58e165f0276b 100644 --- a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go @@ -1,6 +1,8 @@ package lumberjack import ( + "bytes" + "compress/gzip" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +12,7 @@ import ( "time" "github.com/BurntSushi/toml" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" ) // !!!NOTE!!! @@ -41,7 +43,7 @@ func TestNewFile(t *testing.T) { n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) - existsWithLen(logFile(dir), n, t) + existsWithContent(logFile(dir), b, t) fileCount(dir, 1, t) } @@ -54,7 +56,7 @@ func TestOpenExisting(t *testing.T) { data := []byte("foo!") err := ioutil.WriteFile(filename, data, 0644) isNil(err, t) - existsWithLen(filename, len(data), t) + existsWithContent(filename, data, t) l := &Logger{ Filename: filename, @@ -66,7 +68,7 @@ func TestOpenExisting(t *testing.T) { equals(len(b), n, t) // make sure the file got appended - existsWithLen(filename, len(data)+n, t) + existsWithContent(filename, append(data, b...), t) // make sure no other files were created fileCount(dir, 1, t) @@ -106,7 +108,7 @@ func TestMakeLogDir(t *testing.T) { n, err := l.Write(b) isNil(err, t) equals(len(b), n, t) - existsWithLen(logFile(dir), n, t) + existsWithContent(logFile(dir), b, t) fileCount(dir, 1, t) } @@ -122,7 +124,7 @@ func TestDefaultFilename(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) } func TestAutoRotate(t *testing.T) { @@ -143,7 +145,7 @@ func TestAutoRotate(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) newFakeTime() @@ -155,10 +157,10 @@ func TestAutoRotate(t *testing.T) { // the old logfile should be moved aside and the main logfile should have // only the last write in it. - existsWithLen(filename, n, t) + existsWithContent(filename, b2, t) // the backup file will use the current fake time and have the old contents. - existsWithLen(backupFile(dir), len(b), t) + existsWithContent(backupFile(dir), b, t) fileCount(dir, 2, t) } @@ -188,8 +190,8 @@ func TestFirstWriteRotate(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) - existsWithLen(backupFile(dir), len(start), t) + existsWithContent(filename, b, t) + existsWithContent(backupFile(dir), start, t) fileCount(dir, 2, t) } @@ -212,7 +214,7 @@ func TestMaxBackups(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) newFakeTime() @@ -225,25 +227,26 @@ func TestMaxBackups(t *testing.T) { // this will use the new fake time secondFilename := backupFile(dir) - existsWithLen(secondFilename, len(b), t) + existsWithContent(secondFilename, b, t) - // make sure the old file still exists with the same size. - existsWithLen(filename, n, t) + // make sure the old file still exists with the same content. + existsWithContent(filename, b2, t) fileCount(dir, 2, t) newFakeTime() // this will make us rotate again - n, err = l.Write(b2) + b3 := []byte("baaaaaar!") + n, err = l.Write(b3) isNil(err, t) - equals(len(b2), n, t) + equals(len(b3), n, t) // this will use the new fake time thirdFilename := backupFile(dir) - existsWithLen(thirdFilename, len(b2), t) + existsWithContent(thirdFilename, b2, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b3, t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -253,7 +256,7 @@ func TestMaxBackups(t *testing.T) { fileCount(dir, 2, t) // second file name should still exist - existsWithLen(thirdFilename, len(b2), t) + existsWithContent(thirdFilename, b2, t) // should have deleted the first backup notExist(secondFilename, t) @@ -276,14 +279,24 @@ func TestMaxBackups(t *testing.T) { newFakeTime() + // this will use the new fake time + fourthFilename := backupFile(dir) + + // Create a log file that is/was being compressed - this should + // not be counted since both the compressed and the uncompressed + // log files still exist. + compLogFile := fourthFilename+compressSuffix + err = ioutil.WriteFile(compLogFile, []byte("compress"), 0644) + isNil(err, t) + // this will make us rotate again - n, err = l.Write(b2) + b4 := []byte("baaaaaaz!") + n, err = l.Write(b4) isNil(err, t) - equals(len(b2), n, t) + equals(len(b4), n, t) - // this will use the new fake time - fourthFilename := backupFile(dir) - existsWithLen(fourthFilename, len(b2), t) + existsWithContent(fourthFilename, b3, t) + existsWithContent(fourthFilename+compressSuffix, []byte("compress"), t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -291,12 +304,12 @@ func TestMaxBackups(t *testing.T) { // We should have four things in the directory now - the 2 log files, the // not log file, and the directory - fileCount(dir, 4, t) + fileCount(dir, 5, t) // third file name should still exist - existsWithLen(filename, n, t) + existsWithContent(filename, b4, t) - existsWithLen(fourthFilename, len(b2), t) + existsWithContent(fourthFilename, b3, t) // should have deleted the first filename notExist(thirdFilename, t) @@ -328,7 +341,7 @@ func TestCleanupExistingBackups(t *testing.T) { newFakeTime() backup = backupFile(dir) - err = ioutil.WriteFile(backup, data, 0644) + err = ioutil.WriteFile(backup+compressSuffix, data, 0644) isNil(err, t) newFakeTime() @@ -383,7 +396,7 @@ func TestMaxAge(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) // two days later @@ -393,7 +406,7 @@ func TestMaxAge(t *testing.T) { n, err = l.Write(b2) isNil(err, t) equals(len(b2), n, t) - existsWithLen(backupFile(dir), len(b), t) + existsWithContent(backupFile(dir), b, t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -403,19 +416,19 @@ func TestMaxAge(t *testing.T) { // created. fileCount(dir, 2, t) - existsWithLen(filename, len(b2), t) + existsWithContent(filename, b2, t) // we should have deleted the old file due to being too old - existsWithLen(backupFile(dir), len(b), t) + existsWithContent(backupFile(dir), b, t) // two days later newFakeTime() - b3 := []byte("foooooo!") - n, err = l.Write(b2) + b3 := []byte("baaaaar!") + n, err = l.Write(b3) isNil(err, t) equals(len(b3), n, t) - existsWithLen(backupFile(dir), len(b2), t) + existsWithContent(backupFile(dir), b2, t) // we need to wait a little bit since the files get deleted on a different // goroutine. @@ -425,11 +438,10 @@ func TestMaxAge(t *testing.T) { // backup. The earlier backup is past the cutoff and should be gone. fileCount(dir, 2, t) - existsWithLen(filename, len(b3), t) + existsWithContent(filename, b3, t) // we should have deleted the old file due to being too old - existsWithLen(backupFile(dir), len(b2), t) - + existsWithContent(backupFile(dir), b2, t) } func TestOldLogFiles(t *testing.T) { @@ -475,17 +487,23 @@ func TestOldLogFiles(t *testing.T) { func TestTimeFromName(t *testing.T) { l := &Logger{Filename: "/var/log/myfoo/foo.log"} prefix, ext := l.prefixAndExt() - val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext) - equals("2014-05-04T14-44-33.555", val, t) - - val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext) - equals("", val, t) - val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext) - equals("", val, t) + tests := []struct { + filename string + want time.Time + wantErr bool + }{ + {"foo-2014-05-04T14-44-33.555.log", time.Date(2014, 5, 4, 14, 44, 33, 555000000, time.UTC), false}, + {"foo-2014-05-04T14-44-33.555", time.Time{}, true}, + {"2014-05-04T14-44-33.555.log", time.Time{}, true}, + {"foo.log", time.Time{}, true}, + } - val = l.timeFromName("foo.log", prefix, ext) - equals("", val, t) + for _, test := range tests { + got, err := l.timeFromName(test.filename, prefix, ext) + equals(got, test.want, t) + equals(err != nil, test.wantErr, t) + } } func TestLocalTime(t *testing.T) { @@ -511,8 +529,8 @@ func TestLocalTime(t *testing.T) { isNil(err, t) equals(len(b2), n2, t) - existsWithLen(logFile(dir), n2, t) - existsWithLen(backupFileLocal(dir), n, t) + existsWithContent(logFile(dir), b2, t) + existsWithContent(backupFileLocal(dir), b, t) } func TestRotate(t *testing.T) { @@ -533,7 +551,7 @@ func TestRotate(t *testing.T) { isNil(err, t) equals(len(b), n, t) - existsWithLen(filename, n, t) + existsWithContent(filename, b, t) fileCount(dir, 1, t) newFakeTime() @@ -546,8 +564,8 @@ func TestRotate(t *testing.T) { <-time.After(10 * time.Millisecond) filename2 := backupFile(dir) - existsWithLen(filename2, n, t) - existsWithLen(filename, 0, t) + existsWithContent(filename2, b, t) + existsWithContent(filename, []byte{}, t) fileCount(dir, 2, t) newFakeTime() @@ -559,8 +577,8 @@ func TestRotate(t *testing.T) { <-time.After(10 * time.Millisecond) filename3 := backupFile(dir) - existsWithLen(filename3, 0, t) - existsWithLen(filename, 0, t) + existsWithContent(filename3, []byte{}, t) + existsWithContent(filename, []byte{}, t) fileCount(dir, 2, t) b2 := []byte("foooooo!") @@ -569,7 +587,105 @@ func TestRotate(t *testing.T) { equals(len(b2), n, t) // this will use the new fake time - existsWithLen(filename, n, t) + existsWithContent(filename, b2, t) +} + +func TestCompressOnRotate(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestCompressOnRotate", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Compress: true, + Filename: filename, + MaxSize: 10, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithContent(filename, b, t) + fileCount(dir, 1, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + // the old logfile should be moved aside and the main logfile should have + // nothing in it. + existsWithContent(filename, []byte{}, t) + + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // a compressed version of the log file should now exist and the original + // should have been removed. + bc := new(bytes.Buffer) + gz := gzip.NewWriter(bc) + _, err = gz.Write(b) + isNil(err, t) + err = gz.Close() + isNil(err, t) + existsWithContent(backupFile(dir)+compressSuffix, bc.Bytes(), t) + notExist(backupFile(dir), t) + + fileCount(dir, 2, t) +} + +func TestCompressOnResume(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestCompressOnResume", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Compress: true, + Filename: filename, + MaxSize: 10, + } + defer l.Close() + + // Create a backup file and empty "compressed" file. + filename2 := backupFile(dir) + b := []byte("foo!") + err := ioutil.WriteFile(filename2, b, 0644) + isNil(err, t) + err = ioutil.WriteFile(filename2+compressSuffix, []byte{}, 0644) + isNil(err, t) + + newFakeTime() + + b2 := []byte("boo!") + n, err := l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + existsWithContent(filename, b2, t) + + // we need to wait a little bit since the files get compressed on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // The write should have started the compression - a compressed version of + // the log file should now exist and the original should have been removed. + bc := new(bytes.Buffer) + gz := gzip.NewWriter(bc) + _, err = gz.Write(b) + isNil(err, t) + err = gz.Close() + isNil(err, t) + existsWithContent(filename2+compressSuffix, bc.Bytes(), t) + notExist(filename2, t) + + fileCount(dir, 2, t) } func TestJson(t *testing.T) { @@ -579,7 +695,8 @@ func TestJson(t *testing.T) { "maxsize": 5, "maxage": 10, "maxbackups": 3, - "localtime": true + "localtime": true, + "compress": true }`[1:]) l := Logger{} @@ -590,6 +707,7 @@ func TestJson(t *testing.T) { equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) + equals(true, l.Compress, t) } func TestYaml(t *testing.T) { @@ -598,7 +716,8 @@ filename: foo maxsize: 5 maxage: 10 maxbackups: 3 -localtime: true`[1:]) +localtime: true +compress: true`[1:]) l := Logger{} err := yaml.Unmarshal(data, &l) @@ -608,6 +727,7 @@ localtime: true`[1:]) equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) + equals(true, l.Compress, t) } func TestToml(t *testing.T) { @@ -616,7 +736,8 @@ filename = "foo" maxsize = 5 maxage = 10 maxbackups = 3 -localtime = true`[1:] +localtime = true +compress = true`[1:] l := Logger{} md, err := toml.Decode(data, &l) @@ -626,6 +747,7 @@ localtime = true`[1:] equals(10, l.MaxAge, t) equals(3, l.MaxBackups, t) equals(true, l.LocalTime, t) + equals(true, l.Compress, t) equals(0, len(md.Undecoded()), t) } @@ -635,15 +757,19 @@ localtime = true`[1:] func makeTempDir(name string, t testing.TB) string { dir := time.Now().Format(name + backupTimeFormat) dir = filepath.Join(os.TempDir(), dir) - isNilUp(os.Mkdir(dir, 0777), t, 1) + isNilUp(os.Mkdir(dir, 0700), t, 1) return dir } -// existsWithLen checks that the given file exists and has the correct length. -func existsWithLen(path string, length int, t testing.TB) { +// existsWithContent checks that the given file exists and has the correct content. +func existsWithContent(path string, content []byte, t testing.TB) { info, err := os.Stat(path) isNilUp(err, t, 1) - equalsUp(int64(length), info.Size(), t, 1) + equalsUp(int64(len(content)), info.Size(), t, 1) + + b, err := ioutil.ReadFile(path) + isNilUp(err, t, 1) + equalsUp(content, b, t, 1) } // logFile returns the log file name in the given directory for the current fake diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/rotate_test.go b/vendor/gopkg.in/natefinch/lumberjack.v2/rotate_test.go index 0561464ac0d0..4bd4325da862 100644 --- a/vendor/gopkg.in/natefinch/lumberjack.v2/rotate_test.go +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/rotate_test.go @@ -8,7 +8,7 @@ import ( "os/signal" "syscall" - "github.com/natefinch/lumberjack" + "gopkg.in/natefinch/lumberjack.v2" ) // Example of how to rotate in response to SIGHUP. diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml index 03ddc7318ae6..01a310789387 100644 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -1,7 +1,6 @@ language: go dist: xenial go: - - 1.9.x - 1.10.x - 1.11.x script: diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 11ad7ce1a40b..325b40b0763f 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - dims - lavalamp