From be1352ba71a1537bec2278cef90f2e5e2a0a29a9 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 2 Apr 2019 15:07:30 +0200 Subject: [PATCH 1/4] Add openshift/library-go dependency Adding openshift/library-go requires bumping k8s.io/apiextensions-apiserver and setting sigs.k8s.io/controller-tools override to point to openshift's fork. Additionally I'm setting prune.project for library-go so that this project does not get removed by dep. We are not explicitly using library-go inside code, other than the generation part in Makefile. --- Gopkg.toml | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/Gopkg.toml b/Gopkg.toml index 14e828d2a6..128f36d1dd 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,4 +1,4 @@ -required = [ "k8s.io/code-generator/cmd/client-gen", "k8s.io/gengo/types" ] +required = [ "k8s.io/code-generator/cmd/client-gen", "k8s.io/gengo/types", "github.com/openshift/library-go/cmd/crd-schema-gen" ] [prune] non-go = true @@ -15,6 +15,11 @@ required = [ "k8s.io/code-generator/cmd/client-gen", "k8s.io/gengo/types" ] non-go = false unused-packages = false +[[prune.project]] + name = "github.com/openshift/library-go" + non-go = false + unused-packages = false + [[constraint]] name = "github.com/golang/glog" revision = "3c92600d7533018d216b534fe894ad60a1e6d5bf" @@ -32,11 +37,11 @@ required = [ "k8s.io/code-generator/cmd/client-gen", "k8s.io/gengo/types" ] name = "k8s.io/client-go" version = "kubernetes-1.13.4" -[[constraint]] +[[override]] name = "k8s.io/apiextensions-apiserver" # use origin fork with nullable support. # TODO(sttts, sig-master): switch back to upstream version in 1.14 - branch = "origin-4.0-kubernetes-1.12.4" + branch = "origin-4.1-kubernetes-1.13.4" source = "https://github.com/openshift/kubernetes-apiextensions-apiserver" [[constraint]] @@ -70,3 +75,14 @@ required = [ "k8s.io/code-generator/cmd/client-gen", "k8s.io/gengo/types" ] [[constraint]] name = "github.com/openshift/client-go" branch = "master" + +[[override]] + name = "github.com/openshift/library-go" + branch = "master" + +[[override]] + name = "sigs.k8s.io/controller-tools" + # use origin fork with nullable support. + # TODO(soltysh, sig-master): switch back to upstream version in 1.14 + branch = "origin-4.1-kubernetes-1.13.4" + source = "https://github.com/openshift/kubernetes-sigs-controller-tools" From d6738b3cf002b781a46c11d5f24cde57034daa52 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 26 Apr 2019 22:41:16 +0200 Subject: [PATCH 2/4] bump --- Gopkg.lock | 81 +- vendor/github.com/gobuffalo/envy/LICENSE.txt | 8 + vendor/github.com/gobuffalo/envy/envy.go | 276 ++ vendor/github.com/gobuffalo/envy/version.go | 3 + vendor/github.com/joho/godotenv/LICENCE | 23 + vendor/github.com/joho/godotenv/godotenv.go | 346 ++ vendor/github.com/markbates/inflect/LICENCE | 7 + .../github.com/markbates/inflect/helpers.go | 19 + .../github.com/markbates/inflect/inflect.go | 892 +++++ vendor/github.com/markbates/inflect/name.go | 163 + .../github.com/markbates/inflect/version.go | 3 + .../openshift/library-go/.gitignore | 20 + .../github.com/openshift/library-go/LICENSE | 201 + .../github.com/openshift/library-go/Makefile | 20 + vendor/github.com/openshift/library-go/OWNERS | 8 + .../github.com/openshift/library-go/README.md | 4 + .../library-go/alpha-build-machinery/Makefile | 46 + .../library-go/alpha-build-machinery/OWNERS | 4 + .../alpha-build-machinery/README.md | 37 + .../make/default.example.mk | 40 + .../make/default.example.mk.help.log | 25 + .../alpha-build-machinery/make/default.mk | 20 + .../make/examples/multiple-binaries/Makefile | 3 + .../examples/multiple-binaries/Makefile.test | 9 + .../multiple-binaries/Makefile.test.log | 9 + .../examples/multiple-binaries/cmd/oc/main.go | 5 + .../multiple-binaries/cmd/openshift/main.go | 5 + .../make/golang.example.mk | 14 + .../make/golang.example.mk.help.log | 14 + .../alpha-build-machinery/make/golang.mk | 28 + .../alpha-build-machinery/make/lib/golang.mk | 16 + .../make/operator.example.mk | 42 + .../make/operator.example.mk.help.log | 25 + .../alpha-build-machinery/make/operator.mk | 11 + .../make/targets/golang/build.mk | 21 + .../make/targets/golang/test-unit.mk | 19 + .../make/targets/golang/verify-update.mk | 34 + .../make/targets/help.mk | 6 + .../make/targets/openshift/bindata.mk | 65 + .../make/targets/openshift/codegen.mk | 41 + .../make/targets/openshift/deps.mk | 35 + .../make/targets/openshift/images.mk | 19 + .../targets/openshift/operator/release.mk | 7 + .../scripts/update-deps.sh | 27 + .../cmd/crd-schema-gen/generator/generator.go | 391 ++ .../library-go/cmd/crd-schema-gen/main.go | 15 + .../openshift/library-go/glide.lock | 858 ++++ .../openshift/library-go/glide.yaml | 59 + .../openshift/library-go/pkg/assets/assets.go | 150 + .../library-go/pkg/assets/create/OWNERS | 4 + .../library-go/pkg/assets/create/creater.go | 248 ++ ...kube-apiserver-operator_01_config.crd.yaml | 16 + .../00_openshift-kube-apiserver-ns.yaml | 6 + .../configmap-aggregator-client-ca.yaml | 7 + .../create/testdata/operator-config.yaml | 6 + .../testdata/secret-aggregator-client.yaml | 7 + .../library-go/pkg/assets/template.go | 78 + .../openshift/library-go/pkg/certs/util.go | 70 + .../pkg/config/client/client_config.go | 131 + .../clusteroperator/v1helpers/status.go | 140 + .../config/configdefaults/config_default.go | 81 + .../library-go/pkg/config/helpers/client.go | 71 + .../pkg/config/helpers/config_refs.go | 145 + .../library-go/pkg/config/helpers/general.go | 64 + .../config/leaderelection/leaderelection.go | 102 + .../library-go/pkg/config/serving/options.go | 51 + .../library-go/pkg/config/serving/server.go | 78 + .../pkg/config/validation/general.go | 130 + .../pkg/config/validation/serving_info.go | 174 + .../pkg/controller/controllercmd/builder.go | 273 ++ .../pkg/controller/controllercmd/cmd.go | 217 + .../pkg/controller/controllercmd/flags.go | 129 + .../pkg/controller/fileobserver/OWNERS | 6 + .../pkg/controller/fileobserver/observer.go | 59 + .../fileobserver/observer_polling.go | 140 + .../pkg/controller/metrics/client_metrics.go | 53 + .../controller/metrics/workqueue_metrics.go | 90 + .../library-go/pkg/controller/ownerref.go | 60 + .../openshift/library-go/pkg/crypto/crypto.go | 1031 +++++ .../library-go/pkg/crypto/rotation.go | 20 + .../pkg/crypto/testfiles/tls-expired.crt | 14 + .../pkg/crypto/testfiles/tls-multiple.crt | 39 + .../library-go/pkg/crypto/testfiles/tls.crt | 13 + .../library-go/pkg/crypto/testfiles/tls.key | 5 + .../openshift/library-go/pkg/git/OWNERS | 12 + .../openshift/library-go/pkg/git/doc.go | 2 + .../openshift/library-go/pkg/git/git.go | 51 + .../library-go/pkg/git/repository.go | 552 +++ .../pkg/image/internal/digest/digest.go | 138 + .../pkg/image/internal/digest/digester.go | 155 + .../pkg/image/internal/digest/doc.go | 5 + .../pkg/image/internal/reference/doc.go | 5 + .../pkg/image/internal/reference/reference.go | 370 ++ .../pkg/image/internal/reference/regexp.go | 124 + .../pkg/image/reference/reference.go | 245 ++ .../network/networkapihelpers/annotations.go | 63 + .../pkg/operator/certrotation/cabundle.go | 120 + .../client_cert_rotation_controller.go | 196 + .../pkg/operator/certrotation/config.go | 40 + .../pkg/operator/certrotation/label.go | 61 + .../pkg/operator/certrotation/signer.go | 136 + .../pkg/operator/certrotation/target.go | 267 ++ .../certrotation/testfiles/tls-expired.crt | 14 + .../certrotation/testfiles/tls-multiple.crt | 39 + .../operator/certrotation/testfiles/tls.crt | 13 + .../operator/certrotation/testfiles/tls.key | 5 + .../cloudprovider/observe_cloudprovider.go | 157 + .../config_observer_controller.go | 192 + .../featuregates/observe_featuregates.go | 97 + .../operator/configobserver/network/OWNERS | 10 + .../configobserver/network/observe_network.go | 59 + .../library-go/pkg/operator/events/OWNERS | 8 + .../events/eventstesting/recorder_testing.go | 46 + .../pkg/operator/events/recorder.go | 208 + .../pkg/operator/events/recorder_in_memory.go | 77 + .../pkg/operator/events/recorder_logging.go | 49 + .../pkg/operator/events/recorder_upstream.go | 70 + .../operator/loglevel/logging_controller.go | 118 + .../library-go/pkg/operator/loglevel/util.go | 89 + .../operator/management/management_state.go | 69 + .../management/management_state_controller.go | 142 + .../pkg/operator/render/options/config.go | 42 + .../pkg/operator/render/options/generic.go | 151 + .../pkg/operator/render/options/manifest.go | 95 + .../library-go/pkg/operator/render/render.go | 31 + .../resource/resourceapply/apiextensions.go | 42 + .../resource/resourceapply/apiregistration.go | 45 + .../operator/resource/resourceapply/apps.go | 114 + .../operator/resource/resourceapply/core.go | 280 ++ .../resource/resourceapply/event_helpers.go | 86 + .../resource/resourceapply/generic.go | 87 + .../resourceapply/json_patch_helpers.go | 33 + .../resource/resourceapply/monitoring.go | 101 + .../operator/resource/resourceapply/rbac.go | 190 + .../resource/resourceapply/storage.go | 50 + .../resource/resourcegraph/coordinates.go | 16 + .../resource/resourcegraph/interface.go | 62 + .../resource/resourcegraph/resource.go | 73 + .../resource/resourcegraph/resources.go | 126 + .../resource/resourcehash/as_configmap.go | 171 + .../resource/resourcemerge/apiextensions.go | 18 + .../operator/resource/resourcemerge/apps.go | 80 + .../resourcemerge/generic_config_merger.go | 134 + .../resource/resourcemerge/object_merger.go | 153 + .../resource/resourceread/apiextensions.go | 26 + .../operator/resource/resourceread/apps.go | 34 + .../operator/resource/resourceread/core.go | 70 + .../operator/resource/resourceread/rbac.go | 50 + .../operator/resource/resourceread/storage.go | 26 + .../pkg/operator/resource/retry/retry.go | 59 + .../pkg/operator/resource/retry/wait.go | 36 + .../operator/resourcesynccontroller/core.go | 67 + .../resourcesynccontroller/interfaces.go | 19 + .../resourcesync_controller.go | 329 ++ .../staticpod/certsyncpod/certsync_cmd.go | 119 + .../certsyncpod/certsync_controller.go | 203 + .../backing_resource_controller.go | 181 + .../backingresource/bindata/bindata.go | 258 ++ .../installer-cluster-rolebinding.yaml | 12 + .../manifests/installer-sa.yaml | 5 + .../controller/installer/bindata/bindata.go | 263 ++ .../installer/installer_controller.go | 843 ++++ .../staticpod/controller/installer/int32.go | 187 + .../installer/manifests/installer-pod.yaml | 43 + .../controller/monitoring/bindata/bindata.go | 314 ++ .../manifests/prometheus-role-binding.yaml | 13 + .../monitoring/manifests/prometheus-role.yaml | 17 + .../monitoring/manifests/service-monitor.yaml | 26 + .../monitoring_resource_controller.go | 195 + .../controller/node/node_controller.go | 167 + .../controller/prune/bindata/bindata.go | 254 ++ .../prune/manifests/pruner-pod.yaml | 33 + .../controller/prune/prune_controller.go | 363 ++ .../revision/revision_controller.go | 381 ++ .../staticpodstate_controller.go | 217 + .../pkg/operator/staticpod/controllers.go | 284 ++ .../operator/staticpod/installerpod/cmd.go | 359 ++ .../operator/staticpod/installerpod/copy.go | 67 + .../pkg/operator/staticpod/prune/cmd.go | 116 + .../pkg/operator/status/condition.go | 134 + .../pkg/operator/status/status_controller.go | 250 ++ .../library-go/pkg/operator/status/version.go | 85 + .../unsupportedconfigoverrides_controller.go | 194 + .../pkg/operator/v1helpers/core_getters.go | 102 + .../pkg/operator/v1helpers/fake_informers.go | 7 + .../pkg/operator/v1helpers/helpers.go | 259 ++ .../pkg/operator/v1helpers/informers.go | 105 + .../pkg/operator/v1helpers/interfaces.go | 30 + .../pkg/operator/v1helpers/test_helpers.go | 215 + .../pkg/operator/versioning/compare.go | 67 + .../openshift/library-go/pkg/proc/reaper.go | 37 + .../library-go/pkg/proc/reaper_unsupported.go | 8 + .../library-go/pkg/serviceability/logrus.go | 36 + .../library-go/pkg/serviceability/panic.go | 93 + .../library-go/pkg/serviceability/profiler.go | 34 + .../library-go/pkg/serviceability/sentry.go | 62 + .../pkg/serviceability/serviceability.go | 62 + .../github.com/rogpeppe/go-internal/LICENSE | 27 + .../rogpeppe/go-internal/modfile/gopkgin.go | 47 + .../rogpeppe/go-internal/modfile/print.go | 164 + .../rogpeppe/go-internal/modfile/read.go | 869 ++++ .../rogpeppe/go-internal/modfile/rule.go | 724 ++++ .../rogpeppe/go-internal/module/module.go | 540 +++ .../rogpeppe/go-internal/semver/semver.go | 388 ++ vendor/github.com/spf13/afero/LICENSE.txt | 174 + vendor/github.com/spf13/afero/afero.go | 108 + vendor/github.com/spf13/afero/basepath.go | 180 + .../github.com/spf13/afero/cacheOnReadFs.go | 290 ++ vendor/github.com/spf13/afero/const_bsds.go | 22 + .../github.com/spf13/afero/const_win_unix.go | 25 + .../github.com/spf13/afero/copyOnWriteFs.go | 293 ++ vendor/github.com/spf13/afero/httpFs.go | 110 + vendor/github.com/spf13/afero/ioutil.go | 230 ++ vendor/github.com/spf13/afero/lstater.go | 27 + vendor/github.com/spf13/afero/match.go | 110 + vendor/github.com/spf13/afero/mem/dir.go | 37 + vendor/github.com/spf13/afero/mem/dirmap.go | 43 + vendor/github.com/spf13/afero/mem/file.go | 317 ++ vendor/github.com/spf13/afero/memmap.go | 365 ++ vendor/github.com/spf13/afero/os.go | 101 + vendor/github.com/spf13/afero/path.go | 106 + vendor/github.com/spf13/afero/readonlyfs.go | 80 + vendor/github.com/spf13/afero/regexpfs.go | 214 + vendor/github.com/spf13/afero/unionFile.go | 320 ++ vendor/github.com/spf13/afero/util.go | 330 ++ .../pkg/apis/apiextensions/doc.go | 2 +- .../pkg/apis/apiextensions/types.go | 120 +- .../apis/apiextensions/v1beta1/defaults.go | 19 +- .../pkg/apis/apiextensions/v1beta1/doc.go | 4 +- .../apiextensions/v1beta1/generated.pb.go | 3533 ++++++++++++----- .../apis/apiextensions/v1beta1/register.go | 1 + .../pkg/apis/apiextensions/v1beta1/types.go | 165 +- .../v1beta1/zz_generated.conversion.go | 148 +- .../v1beta1/zz_generated.deepcopy.go | 179 +- .../apiextensions/zz_generated.deepcopy.go | 97 +- .../v1beta1/customresourcedefinition.go | 17 + vendor/sigs.k8s.io/controller-tools/LICENSE | 201 + .../pkg/crd/generator/generator.go | 217 + .../controller-tools/pkg/crd/util/util.go | 130 + .../pkg/internal/codegen/parse/apis.go | 287 ++ .../pkg/internal/codegen/parse/context.go | 42 + .../pkg/internal/codegen/parse/crd.go | 656 +++ .../pkg/internal/codegen/parse/index.go | 161 + .../pkg/internal/codegen/parse/parser.go | 151 + .../pkg/internal/codegen/parse/util.go | 551 +++ .../pkg/internal/codegen/types.go | 213 + .../pkg/internal/general/util.go | 102 + .../controller-tools/pkg/util/util.go | 77 + 248 files changed, 33765 insertions(+), 1081 deletions(-) create mode 100644 vendor/github.com/gobuffalo/envy/LICENSE.txt create mode 100644 vendor/github.com/gobuffalo/envy/envy.go create mode 100644 vendor/github.com/gobuffalo/envy/version.go create mode 100644 vendor/github.com/joho/godotenv/LICENCE create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/markbates/inflect/LICENCE create mode 100644 vendor/github.com/markbates/inflect/helpers.go create mode 100644 vendor/github.com/markbates/inflect/inflect.go create mode 100644 vendor/github.com/markbates/inflect/name.go create mode 100644 vendor/github.com/markbates/inflect/version.go create mode 100644 vendor/github.com/openshift/library-go/.gitignore create mode 100644 vendor/github.com/openshift/library-go/LICENSE create mode 100644 vendor/github.com/openshift/library-go/Makefile create mode 100644 vendor/github.com/openshift/library-go/OWNERS create mode 100644 vendor/github.com/openshift/library-go/README.md create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/README.md create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk create mode 100755 vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh create mode 100755 vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go create mode 100755 vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go create mode 100644 vendor/github.com/openshift/library-go/glide.lock create mode 100644 vendor/github.com/openshift/library-go/glide.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/assets.go create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/creater.go create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/template.go create mode 100644 vendor/github.com/openshift/library-go/pkg/certs/util.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/client/client_config.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/helpers/client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/helpers/general.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/serving/options.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/serving/server.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/validation/general.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/ownerref.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/crypto.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/rotation.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key create mode 100644 vendor/github.com/openshift/library-go/pkg/git/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/git/doc.go create mode 100644 vendor/github.com/openshift/library-go/pkg/git/git.go create mode 100644 vendor/github.com/openshift/library-go/pkg/git/repository.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/reference/reference.go create mode 100644 vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/render/render.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/status/condition.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/status/version.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go create mode 100644 vendor/github.com/openshift/library-go/pkg/proc/reaper.go create mode 100644 vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go create mode 100644 vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go create mode 100644 vendor/github.com/openshift/library-go/pkg/serviceability/panic.go create mode 100644 vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go create mode 100644 vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go create mode 100644 vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go create mode 100644 vendor/github.com/rogpeppe/go-internal/LICENSE create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/print.go create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/read.go create mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/rule.go create mode 100644 vendor/github.com/rogpeppe/go-internal/module/module.go create mode 100644 vendor/github.com/rogpeppe/go-internal/semver/semver.go create mode 100644 vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 vendor/github.com/spf13/afero/afero.go create mode 100644 vendor/github.com/spf13/afero/basepath.go create mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 vendor/github.com/spf13/afero/const_bsds.go create mode 100644 vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 vendor/github.com/spf13/afero/httpFs.go create mode 100644 vendor/github.com/spf13/afero/ioutil.go create mode 100644 vendor/github.com/spf13/afero/lstater.go create mode 100644 vendor/github.com/spf13/afero/match.go create mode 100644 vendor/github.com/spf13/afero/mem/dir.go create mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 vendor/github.com/spf13/afero/mem/file.go create mode 100644 vendor/github.com/spf13/afero/memmap.go create mode 100644 vendor/github.com/spf13/afero/os.go create mode 100644 vendor/github.com/spf13/afero/path.go create mode 100644 vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 vendor/github.com/spf13/afero/regexpfs.go create mode 100644 vendor/github.com/spf13/afero/unionFile.go create mode 100644 vendor/github.com/spf13/afero/util.go create mode 100644 vendor/sigs.k8s.io/controller-tools/LICENSE create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/util/util.go diff --git a/Gopkg.lock b/Gopkg.lock index 34835129ef..eb493ce3c4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -41,6 +41,14 @@ revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" +[[projects]] + digest = "1:53151cc4366e3945282d4b783fd41f35222cabbc75601e68d8133648c63498d1" + name = "github.com/gobuffalo/envy" + packages = ["."] + pruneopts = "NUT" + revision = "043cb4b8af871b49563291e32c66bb84378a60ac" + version = "v1.7.0" + [[projects]] digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af" name = "github.com/gogo/protobuf" @@ -155,6 +163,14 @@ revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" +[[projects]] + digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a" + name = "github.com/joho/godotenv" + packages = ["."] + pruneopts = "NUT" + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + [[projects]] digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41" name = "github.com/json-iterator/go" @@ -163,6 +179,14 @@ revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82" version = "1.1.4" +[[projects]] + digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" + name = "github.com/markbates/inflect" + packages = ["."] + pruneopts = "NUT" + revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" + version = "v1.0.4" + [[projects]] digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" @@ -222,6 +246,17 @@ pruneopts = "NUT" revision = "0255926f53935175fe90b8e7672c4c06c17d79e6" +[[projects]] + branch = "master" + digest = "1:64cbcc543ec1cb552373bb5786b3fb603396fba90e7adf659901b71cfeead8e6" + name = "github.com/openshift/library-go" + packages = [ + "cmd/crd-schema-gen", + "cmd/crd-schema-gen/generator", + ] + pruneopts = "T" + revision = "f8a04595265370f59e61751050f633861faec718" + [[projects]] branch = "master" digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" @@ -291,6 +326,29 @@ pruneopts = "NUT" revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" +[[projects]] + digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3" + name = "github.com/rogpeppe/go-internal" + packages = [ + "modfile", + "module", + "semver", + ] + pruneopts = "NUT" + revision = "438578804ca6f31be148c27683afc419ce47c06e" + version = "v1.3.0" + +[[projects]] + digest = "1:6792bb72ea0e7112157d02e4e175cd421b43d004a853f56316a19beca6e0c074" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "NUT" + revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" + version = "v1.2.2" + [[projects]] digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d" name = "github.com/spf13/cobra" @@ -478,8 +536,8 @@ version = "kubernetes-1.13.4" [[projects]] - branch = "origin-4.0-kubernetes-1.12.4" - digest = "1:feafda832ac18a71eabea98ed41c358d4d208d08740d53a6c786591117568f5a" + branch = "origin-4.1-kubernetes-1.13.4" + digest = "1:09167b1f44c6ac3bcd8c1d9356234b2897c279ee14d258004fa0141c08b441c4" name = "k8s.io/apiextensions-apiserver" packages = [ "pkg/apis/apiextensions", @@ -490,7 +548,7 @@ "pkg/client/listers/apiextensions/v1beta1", ] pruneopts = "NUT" - revision = "36be1826c980fa7ca82d91645e06e3157f159322" + revision = "3c74db8dd172051b029f91536c681a1b43694809" source = "https://github.com/openshift/kubernetes-apiextensions-apiserver" [[projects]] @@ -695,6 +753,22 @@ pruneopts = "NUT" revision = "66066c83e385e385ccc3c964b44fd7dcd413d0ed" +[[projects]] + branch = "origin-4.1-kubernetes-1.13.4" + digest = "1:bfa25f6d0ca1839d884d1d154e99769416fa94334b8ec07692f90624ac364595" + name = "sigs.k8s.io/controller-tools" + packages = [ + "pkg/crd/generator", + "pkg/crd/util", + "pkg/internal/codegen", + "pkg/internal/codegen/parse", + "pkg/internal/general", + "pkg/util", + ] + pruneopts = "NUT" + revision = "61258e1c31f7a84247fd608c0bda7274a50c2d67" + source = "https://github.com/openshift/kubernetes-sigs-controller-tools" + [[projects]] digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" name = "sigs.k8s.io/yaml" @@ -723,6 +797,7 @@ "github.com/openshift/client-go/config/informers/externalversions/config/v1", "github.com/openshift/client-go/config/listers/config/v1", "github.com/openshift/client-go/security/clientset/versioned/typed/security/v1", + "github.com/openshift/library-go/cmd/crd-schema-gen", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", diff --git a/vendor/github.com/gobuffalo/envy/LICENSE.txt b/vendor/github.com/gobuffalo/envy/LICENSE.txt new file mode 100644 index 0000000000..123ddc0d80 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/LICENSE.txt @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright (c) 2018 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gobuffalo/envy/envy.go b/vendor/github.com/gobuffalo/envy/envy.go new file mode 100644 index 0000000000..dc31ba2c0c --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/envy.go @@ -0,0 +1,276 @@ +/* +package envy makes working with ENV variables in Go trivial. + +* Get ENV variables with default values. +* Set ENV variables safely without affecting the underlying system. +* Temporarily change ENV vars; useful for testing. +* Map all of the key/values in the ENV. +* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) +* More! +*/ +package envy + +import ( + "errors" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/joho/godotenv" + "github.com/rogpeppe/go-internal/modfile" +) + +var gil = &sync.RWMutex{} +var env = map[string]string{} + +// GO111MODULE is ENV for turning mods on/off +const GO111MODULE = "GO111MODULE" + +func init() { + Load() + loadEnv() +} + +// Load the ENV variables to the env map +func loadEnv() { + gil.Lock() + defer gil.Unlock() + + if os.Getenv("GO_ENV") == "" { + // if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care + // about v.Value (verbose test mode); we just want to know if the test environment has defined + // it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false), + // so we could not depend on v.Value anyway. + // + if v := flag.Lookup("test.v"); v != nil { + env["GO_ENV"] = "test" + } + } + + // set the GOPATH if using >= 1.8 and the GOPATH isn't set + if os.Getenv("GOPATH") == "" { + out, err := exec.Command("go", "env", "GOPATH").Output() + if err == nil { + gp := strings.TrimSpace(string(out)) + os.Setenv("GOPATH", gp) + } + } + + for _, e := range os.Environ() { + pair := strings.Split(e, "=") + env[pair[0]] = os.Getenv(pair[0]) + } +} + +// Mods returns true if module support is enabled, false otherwise +// See https://github.com/golang/go/wiki/Modules#how-to-install-and-activate-module-support for details +func Mods() bool { + go111 := Get(GO111MODULE, "") + + if !InGoPath() { + return go111 != "off" + } + + return go111 == "on" +} + +// Reload the ENV variables. Useful if +// an external ENV manager has been used +func Reload() { + env = map[string]string{} + loadEnv() +} + +// Load .env files. Files will be loaded in the same order that are received. +// Redefined vars will override previously existing values. +// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env +// If no arg passed, it will try to load a .env file. +func Load(files ...string) error { + + // If no files received, load the default one + if len(files) == 0 { + err := godotenv.Overload() + if err == nil { + Reload() + } + return err + } + + // We received a list of files + for _, file := range files { + + // Check if it exists or we can access + if _, err := os.Stat(file); err != nil { + // It does not exist or we can not access. + // Return and stop loading + return err + } + + // It exists and we have permission. Load it + if err := godotenv.Overload(file); err != nil { + return err + } + + // Reload the env so all new changes are noticed + Reload() + + } + return nil +} + +// Get a value from the ENV. If it doesn't exist the +// default value will be returned. +func Get(key string, value string) string { + gil.RLock() + defer gil.RUnlock() + if v, ok := env[key]; ok { + return v + } + return value +} + +// Get a value from the ENV. If it doesn't exist +// an error will be returned +func MustGet(key string) (string, error) { + gil.RLock() + defer gil.RUnlock() + if v, ok := env[key]; ok { + return v, nil + } + return "", fmt.Errorf("could not find ENV var with %s", key) +} + +// Set a value into the ENV. This is NOT permanent. It will +// only affect values accessed through envy. +func Set(key string, value string) { + gil.Lock() + defer gil.Unlock() + env[key] = value +} + +// MustSet the value into the underlying ENV, as well as envy. +// This may return an error if there is a problem setting the +// underlying ENV value. +func MustSet(key string, value string) error { + gil.Lock() + defer gil.Unlock() + err := os.Setenv(key, value) + if err != nil { + return err + } + env[key] = value + return nil +} + +// Map all of the keys/values set in envy. +func Map() map[string]string { + gil.RLock() + defer gil.RUnlock() + cp := map[string]string{} + for k, v := range env { + cp[k] = v + } + return cp +} + +// Temp makes a copy of the values and allows operation on +// those values temporarily during the run of the function. +// At the end of the function run the copy is discarded and +// the original values are replaced. This is useful for testing. +// Warning: This function is NOT safe to use from a goroutine or +// from code which may access any Get or Set function from a goroutine +func Temp(f func()) { + oenv := env + env = map[string]string{} + for k, v := range oenv { + env[k] = v + } + defer func() { env = oenv }() + f() +} + +func GoPath() string { + return Get("GOPATH", "") +} + +func GoBin() string { + return Get("GO_BIN", "go") +} + +func InGoPath() bool { + pwd, _ := os.Getwd() + for _, p := range GoPaths() { + if strings.HasPrefix(pwd, p) { + return true + } + } + return false +} + +// GoPaths returns all possible GOPATHS that are set. +func GoPaths() []string { + gp := Get("GOPATH", "") + if runtime.GOOS == "windows" { + return strings.Split(gp, ";") // Windows uses a different separator + } + return strings.Split(gp, ":") +} + +func importPath(path string) string { + path = strings.TrimPrefix(path, "/private") + for _, gopath := range GoPaths() { + srcpath := filepath.Join(gopath, "src") + rel, err := filepath.Rel(srcpath, path) + if err == nil { + return filepath.ToSlash(rel) + } + } + + // fallback to trim + rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src")) + rel = strings.TrimPrefix(rel, string(filepath.Separator)) + return filepath.ToSlash(rel) +} + +// CurrentModule will attempt to return the module name from `go.mod` if +// modules are enabled. +// If modules are not enabled it will fallback to using CurrentPackage instead. +func CurrentModule() (string, error) { + if !Mods() { + return CurrentPackage(), nil + } + moddata, err := ioutil.ReadFile("go.mod") + if err != nil { + return "", errors.New("go.mod cannot be read or does not exist while go module is enabled") + } + packagePath := modfile.ModulePath(moddata) + if packagePath == "" { + return "", errors.New("go.mod is malformed") + } + return packagePath, nil +} + +// CurrentPackage attempts to figure out the current package name from the PWD +// Use CurrentModule for a more accurate package name. +func CurrentPackage() string { + if Mods() { + } + pwd, _ := os.Getwd() + return importPath(pwd) +} + +func Environ() []string { + gil.RLock() + defer gil.RUnlock() + var e []string + for k, v := range env { + e = append(e, fmt.Sprintf("%s=%s", k, v)) + } + return e +} diff --git a/vendor/github.com/gobuffalo/envy/version.go b/vendor/github.com/gobuffalo/envy/version.go new file mode 100644 index 0000000000..b1623aef72 --- /dev/null +++ b/vendor/github.com/gobuffalo/envy/version.go @@ -0,0 +1,3 @@ +package envy + +const Version = "v1.7.0" diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 0000000000..e7ddd51be9 --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 0000000000..29b436c77c --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,346 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine, envMap) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +//Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return Parse(strings.NewReader(str)) +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, error := Marshal(envMap) + if error != nil { + return error + } + file, error := os.Create(filename) + if error != nil { + return error + } + _, err := file.WriteString(content) + return err +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + //this is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = parseValue(splitString[1], envMap) + return +} + +func parseValue(value string, envMap map[string]string) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + rs := regexp.MustCompile(`\A'(.*)'\z`) + singleQuotes := rs.FindStringSubmatch(value) + + rd := regexp.MustCompile(`\A"(.*)"\z`) + doubleQuotes := rd.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + escapeRegex := regexp.MustCompile(`\\.`) + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + e := regexp.MustCompile(`\\([^$])`) + value = e.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap) + } + } + + return value +} + +func expandVariables(v string, m map[string]string) string { + r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + + return r.ReplaceAllStringFunc(v, func(s string) string { + submatch := r.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/markbates/inflect/LICENCE b/vendor/github.com/markbates/inflect/LICENCE new file mode 100644 index 0000000000..8a36b944a5 --- /dev/null +++ b/vendor/github.com/markbates/inflect/LICENCE @@ -0,0 +1,7 @@ +Copyright (c) 2011 Chris Farmiloe + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/markbates/inflect/helpers.go b/vendor/github.com/markbates/inflect/helpers.go new file mode 100644 index 0000000000..24050c70a0 --- /dev/null +++ b/vendor/github.com/markbates/inflect/helpers.go @@ -0,0 +1,19 @@ +package inflect + +//Helpers is a map of the helper names with its corresponding inflect function +var Helpers = map[string]interface{}{ + "asciffy": Asciify, + "camelize": Camelize, + "camelize_down_first": CamelizeDownFirst, + "capitalize": Capitalize, + "dasherize": Dasherize, + "humanize": Humanize, + "ordinalize": Ordinalize, + "parameterize": Parameterize, + "pluralize": Pluralize, + "pluralize_with_size": PluralizeWithSize, + "singularize": Singularize, + "tableize": Tableize, + "typeify": Typeify, + "underscore": Underscore, +} diff --git a/vendor/github.com/markbates/inflect/inflect.go b/vendor/github.com/markbates/inflect/inflect.go new file mode 100644 index 0000000000..9b6776c191 --- /dev/null +++ b/vendor/github.com/markbates/inflect/inflect.go @@ -0,0 +1,892 @@ +package inflect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// baseAcronyms comes from https://en.wikipedia.org/wiki/List_of_information_technology_acronymss +const baseAcronyms = `JSON,JWT,ID,UUID,SQL,ACK,ACL,ADSL,AES,ANSI,API,ARP,ATM,BGP,BSS,CAT,CCITT,CHAP,CIDR,CIR,CLI,CPE,CPU,CRC,CRT,CSMA,CMOS,DCE,DEC,DES,DHCP,DNS,DRAM,DSL,DSLAM,DTE,DMI,EHA,EIA,EIGRP,EOF,ESS,FCC,FCS,FDDI,FTP,GBIC,gbps,GEPOF,HDLC,HTTP,HTTPS,IANA,ICMP,IDF,IDS,IEEE,IETF,IMAP,IP,IPS,ISDN,ISP,kbps,LACP,LAN,LAPB,LAPF,LLC,MAC,MAN,Mbps,MC,MDF,MIB,MoCA,MPLS,MTU,NAC,NAT,NBMA,NIC,NRZ,NRZI,NVRAM,OSI,OSPF,OUI,PAP,PAT,PC,PIM,PIM,PCM,PDU,POP3,POP,POTS,PPP,PPTP,PTT,PVST,RADIUS,RAM,RARP,RFC,RIP,RLL,ROM,RSTP,RTP,RCP,SDLC,SFD,SFP,SLARP,SLIP,SMTP,SNA,SNAP,SNMP,SOF,SRAM,SSH,SSID,STP,SYN,TDM,TFTP,TIA,TOFU,UDP,URL,URI,USB,UTP,VC,VLAN,VLSM,VPN,W3C,WAN,WEP,WiFi,WPA,WWW` + +// Rule used by rulesets +type Rule struct { + suffix string + replacement string + exact bool +} + +// Ruleset a Ruleset is the config of pluralization rules +// you can extend the rules with the Add* methods +type Ruleset struct { + uncountables map[string]bool + plurals []*Rule + singulars []*Rule + humans []*Rule + acronyms []*Rule +} + +// NewRuleset creates a blank ruleset. Unless you are going to +// build your own rules from scratch you probably +// won't need this and can just use the defaultRuleset +// via the global inflect.* methods +func NewRuleset() *Ruleset { + rs := new(Ruleset) + rs.uncountables = make(map[string]bool) + rs.plurals = make([]*Rule, 0) + rs.singulars = make([]*Rule, 0) + rs.humans = make([]*Rule, 0) + rs.acronyms = make([]*Rule, 0) + return rs +} + +// NewDefaultRuleset creates a new ruleset and load it with the default +// set of common English pluralization rules +func NewDefaultRuleset() *Ruleset { + rs := NewRuleset() + rs.AddPlural("movie", "movies") + rs.AddPlural("s", "s") + rs.AddPlural("testis", "testes") + rs.AddPlural("axis", "axes") + rs.AddPlural("octopus", "octopi") + rs.AddPlural("virus", "viri") + rs.AddPlural("octopi", "octopi") + rs.AddPlural("viri", "viri") + rs.AddPlural("alias", "aliases") + rs.AddPlural("status", "statuses") + rs.AddPlural("Status", "Statuses") + rs.AddPlural("campus", "campuses") + rs.AddPlural("bus", "buses") + rs.AddPlural("buffalo", "buffaloes") + rs.AddPlural("tomato", "tomatoes") + rs.AddPlural("tum", "ta") + rs.AddPlural("ium", "ia") + rs.AddPlural("ta", "ta") + rs.AddPlural("ia", "ia") + rs.AddPlural("sis", "ses") + rs.AddPlural("lf", "lves") + rs.AddPlural("rf", "rves") + rs.AddPlural("afe", "aves") + rs.AddPlural("bfe", "bves") + rs.AddPlural("cfe", "cves") + rs.AddPlural("dfe", "dves") + rs.AddPlural("efe", "eves") + rs.AddPlural("gfe", "gves") + rs.AddPlural("hfe", "hves") + rs.AddPlural("ife", "ives") + rs.AddPlural("jfe", "jves") + rs.AddPlural("kfe", "kves") + rs.AddPlural("lfe", "lves") + rs.AddPlural("mfe", "mves") + rs.AddPlural("nfe", "nves") + rs.AddPlural("ofe", "oves") + rs.AddPlural("pfe", "pves") + rs.AddPlural("qfe", "qves") + rs.AddPlural("rfe", "rves") + rs.AddPlural("sfe", "sves") + rs.AddPlural("tfe", "tves") + rs.AddPlural("ufe", "uves") + rs.AddPlural("vfe", "vves") + rs.AddPlural("wfe", "wves") + rs.AddPlural("xfe", "xves") + rs.AddPlural("yfe", "yves") + rs.AddPlural("zfe", "zves") + rs.AddPlural("hive", "hives") + rs.AddPlural("quy", "quies") + rs.AddPlural("by", "bies") + rs.AddPlural("cy", "cies") + rs.AddPlural("dy", "dies") + rs.AddPlural("fy", "fies") + rs.AddPlural("gy", "gies") + rs.AddPlural("hy", "hies") + rs.AddPlural("jy", "jies") + rs.AddPlural("ky", "kies") + rs.AddPlural("ly", "lies") + rs.AddPlural("my", "mies") + rs.AddPlural("ny", "nies") + rs.AddPlural("py", "pies") + rs.AddPlural("qy", "qies") + rs.AddPlural("ry", "ries") + rs.AddPlural("sy", "sies") + rs.AddPlural("ty", "ties") + rs.AddPlural("vy", "vies") + rs.AddPlural("wy", "wies") + rs.AddPlural("xy", "xies") + rs.AddPlural("zy", "zies") + rs.AddPlural("x", "xes") + rs.AddPlural("ch", "ches") + rs.AddPlural("ss", "sses") + rs.AddPlural("sh", "shes") + rs.AddPlural("matrix", "matrices") + rs.AddPlural("vertix", "vertices") + rs.AddPlural("indix", "indices") + rs.AddPlural("matrex", "matrices") + rs.AddPlural("vertex", "vertices") + rs.AddPlural("index", "indices") + rs.AddPlural("mouse", "mice") + rs.AddPlural("louse", "lice") + rs.AddPlural("mice", "mice") + rs.AddPlural("lice", "lice") + rs.AddPlural("ress", "resses") + rs.AddPluralExact("ox", "oxen", true) + rs.AddPluralExact("oxen", "oxen", true) + rs.AddPluralExact("quiz", "quizzes", true) + rs.AddSingular("s", "") + rs.AddSingular("ss", "ss") + rs.AddSingular("news", "news") + rs.AddSingular("ta", "tum") + rs.AddSingular("ia", "ium") + rs.AddSingular("analyses", "analysis") + rs.AddSingular("bases", "basis") + rs.AddSingularExact("basis", "basis", true) + rs.AddSingular("diagnoses", "diagnosis") + rs.AddSingularExact("diagnosis", "diagnosis", true) + rs.AddSingular("parentheses", "parenthesis") + rs.AddSingular("prognoses", "prognosis") + rs.AddSingular("synopses", "synopsis") + rs.AddSingular("theses", "thesis") + rs.AddSingular("analyses", "analysis") + rs.AddSingularExact("analysis", "analysis", true) + rs.AddSingular("ovies", "ovie") + rs.AddSingular("aves", "afe") + rs.AddSingular("bves", "bfe") + rs.AddSingular("cves", "cfe") + rs.AddSingular("dves", "dfe") + rs.AddSingular("eves", "efe") + rs.AddSingular("gves", "gfe") + rs.AddSingular("hves", "hfe") + rs.AddSingular("ives", "ife") + rs.AddSingular("jves", "jfe") + rs.AddSingular("kves", "kfe") + rs.AddSingular("lves", "lfe") + rs.AddSingular("mves", "mfe") + rs.AddSingular("nves", "nfe") + rs.AddSingular("oves", "ofe") + rs.AddSingular("pves", "pfe") + rs.AddSingular("qves", "qfe") + rs.AddSingular("rves", "rfe") + rs.AddSingular("sves", "sfe") + rs.AddSingular("tves", "tfe") + rs.AddSingular("uves", "ufe") + rs.AddSingular("vves", "vfe") + rs.AddSingular("wves", "wfe") + rs.AddSingular("xves", "xfe") + rs.AddSingular("yves", "yfe") + rs.AddSingular("zves", "zfe") + rs.AddSingular("hives", "hive") + rs.AddSingular("tives", "tive") + rs.AddSingular("lves", "lf") + rs.AddSingular("rves", "rf") + rs.AddSingular("quies", "quy") + rs.AddSingular("bies", "by") + rs.AddSingular("cies", "cy") + rs.AddSingular("dies", "dy") + rs.AddSingular("fies", "fy") + rs.AddSingular("gies", "gy") + rs.AddSingular("hies", "hy") + rs.AddSingular("jies", "jy") + rs.AddSingular("kies", "ky") + rs.AddSingular("lies", "ly") + rs.AddSingular("mies", "my") + rs.AddSingular("nies", "ny") + rs.AddSingular("pies", "py") + rs.AddSingular("qies", "qy") + rs.AddSingular("ries", "ry") + rs.AddSingular("sies", "sy") + rs.AddSingular("ties", "ty") + // rs.AddSingular("vies", "vy") + rs.AddSingular("wies", "wy") + rs.AddSingular("xies", "xy") + rs.AddSingular("zies", "zy") + rs.AddSingular("series", "series") + rs.AddSingular("xes", "x") + rs.AddSingular("ches", "ch") + rs.AddSingular("sses", "ss") + rs.AddSingular("shes", "sh") + rs.AddSingular("mice", "mouse") + rs.AddSingular("lice", "louse") + rs.AddSingular("buses", "bus") + rs.AddSingularExact("bus", "bus", true) + rs.AddSingular("oes", "o") + rs.AddSingular("shoes", "shoe") + rs.AddSingular("crises", "crisis") + rs.AddSingularExact("crisis", "crisis", true) + rs.AddSingular("axes", "axis") + rs.AddSingularExact("axis", "axis", true) + rs.AddSingular("testes", "testis") + rs.AddSingularExact("testis", "testis", true) + rs.AddSingular("octopi", "octopus") + rs.AddSingularExact("octopus", "octopus", true) + rs.AddSingular("viri", "virus") + rs.AddSingularExact("virus", "virus", true) + rs.AddSingular("statuses", "status") + rs.AddSingular("Statuses", "Status") + rs.AddSingular("campuses", "campus") + rs.AddSingularExact("status", "status", true) + rs.AddSingularExact("Status", "Status", true) + rs.AddSingularExact("campus", "campus", true) + rs.AddSingular("aliases", "alias") + rs.AddSingularExact("alias", "alias", true) + rs.AddSingularExact("oxen", "ox", true) + rs.AddSingular("vertices", "vertex") + rs.AddSingular("indices", "index") + rs.AddSingular("matrices", "matrix") + rs.AddSingularExact("quizzes", "quiz", true) + rs.AddSingular("databases", "database") + rs.AddSingular("resses", "ress") + rs.AddSingular("ress", "ress") + rs.AddIrregular("person", "people") + rs.AddIrregular("man", "men") + rs.AddIrregular("child", "children") + rs.AddIrregular("sex", "sexes") + rs.AddIrregular("move", "moves") + rs.AddIrregular("zombie", "zombies") + rs.AddIrregular("Status", "Statuses") + rs.AddIrregular("status", "statuses") + rs.AddIrregular("campus", "campuses") + rs.AddIrregular("human", "humans") + rs.AddUncountable("equipment") + rs.AddUncountable("information") + rs.AddUncountable("rice") + rs.AddUncountable("money") + rs.AddUncountable("species") + rs.AddUncountable("series") + rs.AddUncountable("fish") + rs.AddUncountable("sheep") + rs.AddUncountable("jeans") + rs.AddUncountable("police") + + acronyms := strings.Split(baseAcronyms, ",") + for _, acr := range acronyms { + rs.AddAcronym(acr) + } + + return rs +} + +// Uncountables returns a map of uncountables in the ruleset +func (rs *Ruleset) Uncountables() map[string]bool { + return rs.uncountables +} + +// AddPlural add a pluralization rule +func (rs *Ruleset) AddPlural(suffix, replacement string) { + rs.AddPluralExact(suffix, replacement, false) +} + +// AddPluralExact add a pluralization rule with full string match +func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) { + // remove uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + // prepend + rs.plurals = append([]*Rule{r}, rs.plurals...) +} + +// AddSingular add a singular rule +func (rs *Ruleset) AddSingular(suffix, replacement string) { + rs.AddSingularExact(suffix, replacement, false) +} + +// AddSingularExact same as AddSingular but you can set `exact` to force +// a full string match +func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) { + // remove from uncountable + delete(rs.uncountables, suffix) + // create rule + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + r.exact = exact + rs.singulars = append([]*Rule{r}, rs.singulars...) +} + +// AddHuman Human rules are applied by humanize to show more friendly +// versions of words +func (rs *Ruleset) AddHuman(suffix, replacement string) { + r := new(Rule) + r.suffix = suffix + r.replacement = replacement + rs.humans = append([]*Rule{r}, rs.humans...) +} + +// AddIrregular Add any inconsistent pluralizing/singularizing rules +// to the set here. +func (rs *Ruleset) AddIrregular(singular, plural string) { + delete(rs.uncountables, singular) + delete(rs.uncountables, plural) + rs.AddPlural(singular, plural) + rs.AddPlural(plural, plural) + rs.AddSingular(plural, singular) +} + +// AddAcronym if you use acronym you may need to add them to the ruleset +// to prevent Underscored words of things like "HTML" coming out +// as "h_t_m_l" +func (rs *Ruleset) AddAcronym(word string) { + r := new(Rule) + r.suffix = word + r.replacement = rs.Titleize(strings.ToLower(word)) + rs.acronyms = append(rs.acronyms, r) +} + +// AddUncountable add a word to this ruleset that has the same singular and plural form +// for example: "rice" +func (rs *Ruleset) AddUncountable(word string) { + rs.uncountables[strings.ToLower(word)] = true +} + +func (rs *Ruleset) isUncountable(word string) bool { + // handle multiple words by using the last one + words := strings.Split(word, " ") + if _, exists := rs.uncountables[strings.ToLower(words[len(words)-1])]; exists { + return true + } + return false +} + +//isAcronym returns if a word is acronym or not. +func (rs *Ruleset) isAcronym(word string) bool { + for _, rule := range rs.acronyms { + if strings.ToUpper(rule.suffix) == strings.ToUpper(word) { + return true + } + } + + return false +} + +//PluralizeWithSize pluralize with taking number into account +func (rs *Ruleset) PluralizeWithSize(word string, size int) string { + if size == 1 { + return rs.Singularize(word) + } + return rs.Pluralize(word) +} + +// Pluralize returns the plural form of a singular word +func (rs *Ruleset) Pluralize(word string) string { + if len(word) == 0 { + return word + } + lWord := strings.ToLower(word) + if rs.isUncountable(lWord) { + return word + } + + var candidate string + for _, rule := range rs.plurals { + if rule.exact { + if lWord == rule.suffix { + // Capitalized word + if lWord[0] != word[0] && lWord[1:] == word[1:] { + return rs.Capitalize(rule.replacement) + } + return rule.replacement + } + continue + } + + if strings.EqualFold(word, rule.suffix) { + candidate = rule.replacement + } + + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + + if candidate != "" { + return candidate + } + return word + "s" +} + +//Singularize returns the singular form of a plural word +func (rs *Ruleset) Singularize(word string) string { + if len(word) <= 1 { + return word + } + lWord := strings.ToLower(word) + if rs.isUncountable(lWord) { + return word + } + + var candidate string + + for _, rule := range rs.singulars { + if rule.exact { + if lWord == rule.suffix { + // Capitalized word + if lWord[0] != word[0] && lWord[1:] == word[1:] { + return rs.Capitalize(rule.replacement) + } + return rule.replacement + } + continue + } + + if strings.EqualFold(word, rule.suffix) { + candidate = rule.replacement + } + + if strings.HasSuffix(word, rule.suffix) { + return replaceLast(word, rule.suffix, rule.replacement) + } + } + + if candidate != "" { + return candidate + } + + return word +} + +//Capitalize uppercase first character +func (rs *Ruleset) Capitalize(word string) string { + if rs.isAcronym(word) { + return strings.ToUpper(word) + } + return strings.ToUpper(word[:1]) + word[1:] +} + +//Camelize "dino_party" -> "DinoParty" +func (rs *Ruleset) Camelize(word string) string { + if rs.isAcronym(word) { + return strings.ToUpper(word) + } + words := splitAtCaseChangeWithTitlecase(word) + return strings.Join(words, "") +} + +//CamelizeDownFirst same as Camelcase but with first letter downcased +func (rs *Ruleset) CamelizeDownFirst(word string) string { + word = Camelize(word) + return strings.ToLower(word[:1]) + word[1:] +} + +//Titleize Capitalize every word in sentence "hello there" -> "Hello There" +func (rs *Ruleset) Titleize(word string) string { + words := splitAtCaseChangeWithTitlecase(word) + result := strings.Join(words, " ") + + var acronymWords []string + for index, word := range words { + if len(word) == 1 { + acronymWords = append(acronymWords, word) + } + + if len(word) > 1 || index == len(words)-1 || len(acronymWords) > 1 { + acronym := strings.Join(acronymWords, "") + if !rs.isAcronym(acronym) { + acronymWords = acronymWords[:len(acronymWords)] + continue + } + + result = strings.Replace(result, strings.Join(acronymWords, " "), acronym, 1) + acronymWords = []string{} + } + } + + return result +} + +func (rs *Ruleset) safeCaseAcronyms(word string) string { + // convert an acronym like HTML into Html + for _, rule := range rs.acronyms { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + return word +} + +func (rs *Ruleset) separatedWords(word, sep string) string { + word = rs.safeCaseAcronyms(word) + words := splitAtCaseChange(word) + return strings.Join(words, sep) +} + +//Underscore lowercase underscore version "BigBen" -> "big_ben" +func (rs *Ruleset) Underscore(word string) string { + return rs.separatedWords(word, "_") +} + +//Humanize First letter of sentence capitalized +// Uses custom friendly replacements via AddHuman() +func (rs *Ruleset) Humanize(word string) string { + word = replaceLast(word, "_id", "") // strip foreign key kinds + // replace and strings in humans list + for _, rule := range rs.humans { + word = strings.Replace(word, rule.suffix, rule.replacement, -1) + } + sentence := rs.separatedWords(word, " ") + + r, n := utf8.DecodeRuneInString(sentence) + return string(unicode.ToUpper(r)) + sentence[n:] +} + +//ForeignKey an underscored foreign key name "Person" -> "person_id" +func (rs *Ruleset) ForeignKey(word string) string { + return rs.Underscore(rs.Singularize(word)) + "_id" +} + +//ForeignKeyCondensed a foreign key (with an underscore) "Person" -> "personid" +func (rs *Ruleset) ForeignKeyCondensed(word string) string { + return rs.Underscore(word) + "id" +} + +//Tableize Rails style pluralized table names: "SuperPerson" -> "super_people" +func (rs *Ruleset) Tableize(word string) string { + return rs.Pluralize(rs.Underscore(rs.Typeify(word))) +} + +var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`) + +//Parameterize param safe dasherized names like "my-param" +func (rs *Ruleset) Parameterize(word string) string { + return ParameterizeJoin(word, "-") +} + +//ParameterizeJoin param safe dasherized names with custom separator +func (rs *Ruleset) ParameterizeJoin(word, sep string) string { + word = strings.ToLower(word) + word = rs.Asciify(word) + word = notUrlSafe.ReplaceAllString(word, "") + word = strings.Replace(word, " ", sep, -1) + if len(sep) > 0 { + squash, err := regexp.Compile(sep + "+") + if err == nil { + word = squash.ReplaceAllString(word, sep) + } + } + word = strings.Trim(word, sep+" ") + return word +} + +var lookalikes = map[string]*regexp.Regexp{ + "A": regexp.MustCompile(`À|Á|Â|Ã|Ä|Å`), + "AE": regexp.MustCompile(`Æ`), + "C": regexp.MustCompile(`Ç`), + "E": regexp.MustCompile(`È|É|Ê|Ë`), + "G": regexp.MustCompile(`Ğ`), + "I": regexp.MustCompile(`Ì|Í|Î|Ï|İ`), + "N": regexp.MustCompile(`Ñ`), + "O": regexp.MustCompile(`Ò|Ó|Ô|Õ|Ö|Ø`), + "S": regexp.MustCompile(`Ş`), + "U": regexp.MustCompile(`Ù|Ú|Û|Ü`), + "Y": regexp.MustCompile(`Ý`), + "ss": regexp.MustCompile(`ß`), + "a": regexp.MustCompile(`à|á|â|ã|ä|å`), + "ae": regexp.MustCompile(`æ`), + "c": regexp.MustCompile(`ç`), + "e": regexp.MustCompile(`è|é|ê|ë`), + "g": regexp.MustCompile(`ğ`), + "i": regexp.MustCompile(`ì|í|î|ï|ı`), + "n": regexp.MustCompile(`ñ`), + "o": regexp.MustCompile(`ò|ó|ô|õ|ö|ø`), + "s": regexp.MustCompile(`ş`), + "u": regexp.MustCompile(`ù|ú|û|ü|ũ|ū|ŭ|ů|ű|ų`), + "y": regexp.MustCompile(`ý|ÿ`), +} + +//Asciify transforms Latin characters like é -> e +func (rs *Ruleset) Asciify(word string) string { + for repl, regex := range lookalikes { + word = regex.ReplaceAllString(word, repl) + } + return word +} + +var tablePrefix = regexp.MustCompile(`^[^.]*\.`) + +//Typeify "something_like_this" -> "SomethingLikeThis" +func (rs *Ruleset) Typeify(word string) string { + word = tablePrefix.ReplaceAllString(word, "") + return rs.Camelize(rs.Singularize(word)) +} + +//Dasherize "SomeText" -> "some-text" +func (rs *Ruleset) Dasherize(word string) string { + return rs.separatedWords(word, "-") +} + +//Ordinalize "1031" -> "1031st" +func (rs *Ruleset) Ordinalize(str string) string { + number, err := strconv.Atoi(str) + if err != nil { + return str + } + switch abs(number) % 100 { + case 11, 12, 13: + return fmt.Sprintf("%dth", number) + default: + switch abs(number) % 10 { + case 1: + return fmt.Sprintf("%dst", number) + case 2: + return fmt.Sprintf("%dnd", number) + case 3: + return fmt.Sprintf("%drd", number) + } + } + return fmt.Sprintf("%dth", number) +} + +//ForeignKeyToAttribute returns the attribute name from the foreign key +func (rs *Ruleset) ForeignKeyToAttribute(str string) string { + w := rs.Camelize(str) + if strings.HasSuffix(w, "Id") { + return strings.TrimSuffix(w, "Id") + "ID" + } + return w +} + +//LoadReader loads rules from io.Reader param +func (rs *Ruleset) LoadReader(r io.Reader) error { + m := map[string]string{} + err := json.NewDecoder(r).Decode(&m) + if err != nil { + return fmt.Errorf("could not decode inflection JSON from reader: %s", err) + } + for s, p := range m { + defaultRuleset.AddIrregular(s, p) + } + return nil +} + +///////////////////////////////////////// +// the default global ruleset +////////////////////////////////////////// + +var defaultRuleset *Ruleset + +//LoadReader loads rules from io.Reader param +func LoadReader(r io.Reader) error { + return defaultRuleset.LoadReader(r) +} + +func init() { + defaultRuleset = NewDefaultRuleset() + + pwd, _ := os.Getwd() + cfg := filepath.Join(pwd, "inflections.json") + if p := os.Getenv("INFLECT_PATH"); p != "" { + cfg = p + } + if _, err := os.Stat(cfg); err == nil { + b, err := ioutil.ReadFile(cfg) + if err != nil { + fmt.Printf("could not read inflection file %s (%s)\n", cfg, err) + return + } + if err = defaultRuleset.LoadReader(bytes.NewReader(b)); err != nil { + fmt.Println(err) + } + } +} + +//Uncountables returns a list of uncountables rules +func Uncountables() map[string]bool { + return defaultRuleset.Uncountables() +} + +//AddPlural adds plural to the ruleset +func AddPlural(suffix, replacement string) { + defaultRuleset.AddPlural(suffix, replacement) +} + +//AddSingular adds singular to the ruleset +func AddSingular(suffix, replacement string) { + defaultRuleset.AddSingular(suffix, replacement) +} + +//AddHuman adds human +func AddHuman(suffix, replacement string) { + defaultRuleset.AddHuman(suffix, replacement) +} + +func AddIrregular(singular, plural string) { + defaultRuleset.AddIrregular(singular, plural) +} + +func AddAcronym(word string) { + defaultRuleset.AddAcronym(word) +} + +func AddUncountable(word string) { + defaultRuleset.AddUncountable(word) +} + +func Pluralize(word string) string { + return defaultRuleset.Pluralize(word) +} + +func PluralizeWithSize(word string, size int) string { + return defaultRuleset.PluralizeWithSize(word, size) +} + +func Singularize(word string) string { + return defaultRuleset.Singularize(word) +} + +func Capitalize(word string) string { + return defaultRuleset.Capitalize(word) +} + +func Camelize(word string) string { + return defaultRuleset.Camelize(word) +} + +func CamelizeDownFirst(word string) string { + return defaultRuleset.CamelizeDownFirst(word) +} + +func Titleize(word string) string { + return defaultRuleset.Titleize(word) +} + +func Underscore(word string) string { + return defaultRuleset.Underscore(word) +} + +func Humanize(word string) string { + return defaultRuleset.Humanize(word) +} + +func ForeignKey(word string) string { + return defaultRuleset.ForeignKey(word) +} + +func ForeignKeyCondensed(word string) string { + return defaultRuleset.ForeignKeyCondensed(word) +} + +func Tableize(word string) string { + return defaultRuleset.Tableize(word) +} + +func Parameterize(word string) string { + return defaultRuleset.Parameterize(word) +} + +func ParameterizeJoin(word, sep string) string { + return defaultRuleset.ParameterizeJoin(word, sep) +} + +func Typeify(word string) string { + return defaultRuleset.Typeify(word) +} + +func Dasherize(word string) string { + return defaultRuleset.Dasherize(word) +} + +func Ordinalize(word string) string { + return defaultRuleset.Ordinalize(word) +} + +func Asciify(word string) string { + return defaultRuleset.Asciify(word) +} + +func ForeignKeyToAttribute(word string) string { + return defaultRuleset.ForeignKeyToAttribute(word) +} + +// helper funcs + +func reverse(s string) string { + o := make([]rune, utf8.RuneCountInString(s)) + i := len(o) + for _, c := range s { + i-- + o[i] = c + } + return string(o) +} + +func isSpacerChar(c rune) bool { + switch { + case c == rune("_"[0]): + return true + case c == rune(" "[0]): + return true + case c == rune(":"[0]): + return true + case c == rune("-"[0]): + return true + } + return false +} + +func splitAtCaseChange(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + word = append(word, unicode.ToLower(c)) + } + } + words = append(words, string(word)) + return words +} + +func splitAtCaseChangeWithTitlecase(s string) []string { + words := make([]string, 0) + word := make([]rune, 0) + + for _, c := range s { + spacer := isSpacerChar(c) + if len(word) > 0 { + if unicode.IsUpper(c) || spacer { + words = append(words, string(word)) + word = make([]rune, 0) + } + } + if !spacer { + if len(word) > 0 { + word = append(word, unicode.ToLower(c)) + } else { + word = append(word, unicode.ToUpper(c)) + } + } + } + + words = append(words, string(word)) + return words +} + +func replaceLast(s, match, repl string) string { + // reverse strings + srev := reverse(s) + mrev := reverse(match) + rrev := reverse(repl) + // match first and reverse back + return reverse(strings.Replace(srev, mrev, rrev, 1)) +} + +func abs(x int) int { + if x < 0 { + return -x + } + return x +} diff --git a/vendor/github.com/markbates/inflect/name.go b/vendor/github.com/markbates/inflect/name.go new file mode 100644 index 0000000000..e6863e28a6 --- /dev/null +++ b/vendor/github.com/markbates/inflect/name.go @@ -0,0 +1,163 @@ +package inflect + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/gobuffalo/envy" +) + +// Name is a string that represents the "name" of a thing, like an app, model, etc... +type Name string + +// Title version of a name. ie. "foo_bar" => "Foo Bar" +func (n Name) Title() string { + x := strings.Split(string(n), "/") + for i, s := range x { + x[i] = Titleize(s) + } + + return strings.Join(x, " ") +} + +// Underscore version of a name. ie. "FooBar" => "foo_bar" +func (n Name) Underscore() string { + w := string(n) + if strings.ToUpper(w) == w { + return strings.ToLower(w) + } + return Underscore(w) +} + +// Plural version of a name +func (n Name) Plural() string { + return Pluralize(string(n)) +} + +// Singular version of a name +func (n Name) Singular() string { + return Singularize(string(n)) +} + +// Camel version of a name +func (n Name) Camel() string { + c := Camelize(string(n)) + if strings.HasSuffix(c, "Id") { + c = strings.TrimSuffix(c, "Id") + c += "ID" + } + return c +} + +// Model version of a name. ie. "user" => "User" +func (n Name) Model() string { + x := strings.Split(string(n), "/") + for i, s := range x { + x[i] = Camelize(Singularize(s)) + } + + return strings.Join(x, "") +} + +// Resource version of a name +func (n Name) Resource() string { + name := n.Underscore() + x := strings.FieldsFunc(name, func(r rune) bool { + return r == '_' || r == '/' + }) + + for i, w := range x { + if i == len(x)-1 { + x[i] = Camelize(Pluralize(strings.ToLower(w))) + continue + } + + x[i] = Camelize(w) + } + + return strings.Join(x, "") +} + +// ModelPlural version of a name. ie. "user" => "Users" +func (n Name) ModelPlural() string { + return Camelize(Pluralize(n.Model())) +} + +// File version of a name +func (n Name) File() string { + return Underscore(Camelize(string(n))) +} + +// Table version of a name +func (n Name) Table() string { + return Underscore(Pluralize(string(n))) +} + +// UnderSingular version of a name +func (n Name) UnderSingular() string { + return Underscore(Singularize(string(n))) +} + +// PluralCamel version of a name +func (n Name) PluralCamel() string { + return Pluralize(Camelize(string(n))) +} + +// PluralUnder version of a name +func (n Name) PluralUnder() string { + return Pluralize(Underscore(string(n))) +} + +// URL version of a name +func (n Name) URL() string { + return n.PluralUnder() +} + +// CamelSingular version of a name +func (n Name) CamelSingular() string { + return Camelize(Singularize(string(n))) +} + +// VarCaseSingular version of a name. ie. "FooBar" => "fooBar" +func (n Name) VarCaseSingular() string { + return CamelizeDownFirst(Singularize(Underscore(n.Resource()))) +} + +// VarCasePlural version of a name. ie. "FooBar" => "fooBar" +func (n Name) VarCasePlural() string { + return CamelizeDownFirst(n.Resource()) +} + +// Lower case version of a string +func (n Name) Lower() string { + return strings.ToLower(string(n)) +} + +// ParamID returns foo_bar_id +func (n Name) ParamID() string { + return fmt.Sprintf("%s_id", strings.Replace(n.UnderSingular(), "/", "_", -1)) +} + +// Package returns go package +func (n Name) Package() string { + key := string(n) + + for _, gp := range envy.GoPaths() { + key = strings.TrimPrefix(key, filepath.Join(gp, "src")) + key = strings.TrimPrefix(key, gp) + } + key = strings.TrimPrefix(key, string(filepath.Separator)) + + key = strings.Replace(key, "\\", "/", -1) + return key +} + +// Char returns first character in lower case, this is useful for methods inside a struct. +func (n Name) Char() string { + return strings.ToLower(string(n[0])) +} + +func (n Name) String() string { + return string(n) +} diff --git a/vendor/github.com/markbates/inflect/version.go b/vendor/github.com/markbates/inflect/version.go new file mode 100644 index 0000000000..a167449841 --- /dev/null +++ b/vendor/github.com/markbates/inflect/version.go @@ -0,0 +1,3 @@ +package inflect + +const Version = "v1.0.4" diff --git a/vendor/github.com/openshift/library-go/.gitignore b/vendor/github.com/openshift/library-go/.gitignore new file mode 100644 index 0000000000..533a4d2b86 --- /dev/null +++ b/vendor/github.com/openshift/library-go/.gitignore @@ -0,0 +1,20 @@ +/_output +/third-party +/.project +/.vagrant +/.vscode +/.settings +/cpu.pprof +/os-version-defs +/.make/ +*.swp +.vimrc +.DS_Store +.idea +origin.iml +*.pyc +.tag* +.project +*.go~ +.envrc +.hg_archival.txt diff --git a/vendor/github.com/openshift/library-go/LICENSE b/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/library-go/Makefile b/vendor/github.com/openshift/library-go/Makefile new file mode 100644 index 0000000000..c7f0ce5dfc --- /dev/null +++ b/vendor/github.com/openshift/library-go/Makefile @@ -0,0 +1,20 @@ +all: build +.PHONY: all + +# All the go packages (e.g. for verfy) +GO_PACKAGES :=./pkg/... ./cmd/... +# Packages to be compiled +GO_BUILD_PACKAGES :=$(GO_PACKAGES) +# Do not auto-expand packages for libraries or it would compile them separately +GO_BUILD_PACKAGES_EXPANDED :=$(GO_BUILD_PACKAGES) + +include $(addprefix alpha-build-machinery/make/, \ + golang.mk \ + targets/openshift/deps.mk \ + targets/openshift/bindata.mk \ +) + +$(call add-bindata,backingresources,./pkg/operator/staticpod/controller/backingresource/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/backingresource/bindata/bindata.go) +$(call add-bindata,monitoring,./pkg/operator/staticpod/controller/monitoring/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/monitoring/bindata/bindata.go) +$(call add-bindata,installer,./pkg/operator/staticpod/controller/installer/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/installer/bindata/bindata.go) +$(call add-bindata,staticpod,./pkg/operator/staticpod/controller/prune/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/prune/bindata/bindata.go) diff --git a/vendor/github.com/openshift/library-go/OWNERS b/vendor/github.com/openshift/library-go/OWNERS new file mode 100644 index 0000000000..b372622b7b --- /dev/null +++ b/vendor/github.com/openshift/library-go/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - smarterclayton + - deads2k + - sttts +approvers: + - smarterclayton + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/README.md b/vendor/github.com/openshift/library-go/README.md new file mode 100644 index 0000000000..db05806260 --- /dev/null +++ b/vendor/github.com/openshift/library-go/README.md @@ -0,0 +1,4 @@ +# library-go +Helpers for going from apis and clients to useful runtime constructs. `config.ServingInfo` to useful serving constructs is the canonical example. Anything introduced here must have concrete use-cases in at least two separate openshift repos and be of some reasonable complexity. The bar here is high. We'll start with openshift/api-review as the approvers. + +This repo **must not depend on k8s.io/kubernetes or openshift/origin**. diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile new file mode 100644 index 0000000000..a758c97a7a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile @@ -0,0 +1,46 @@ +SHELL :=/bin/bash +all: verify +.PHONY: all + +makefiles :=$(wildcard ./make/*.example.mk) +examples :=$(wildcard ./make/examples/*/Makefile.test) + +# $1 - makefile name relative to ./make/ folder +# $2 - target +# $3 - output folder +# We need to change dir to the final makefile directory or relative paths won't match +define update-makefile-log +mkdir -p "$(3)" +$(MAKE) -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) + +endef + + +# $1 - makefile name relative to ./make/ folder +# $2 - target +# $3 - output folder +define check-makefile-log +$(call update-makefile-log,$(1),$(2),$(3)) +diff -N "$(1)$(subst ..,.,.$(2).log)" "$(3)/$(notdir $(1))$(subst ..,.,.$(2).log)" + +endef + +update-makefiles: + $(foreach f,$(makefiles),$(call check-makefile-log,$(f),help,$(dir $(f)))) + $(foreach f,$(examples),$(call check-makefile-log,$(f),,$(dir $(f)))) +.PHONY: update-makefiles + +verify-makefiles: tmp_dir:=$(shell mktemp -d) +verify-makefiles: + $(foreach f,$(makefiles),$(call check-makefile-log,$(f),help,$(tmp_dir)/$(dir $(f)))) + $(foreach f,$(examples),$(call check-makefile-log,$(f),,$(tmp_dir)/$(dir $(f)))) +.PHONY: verify-makefiles + +verify: verify-makefiles +.PHONY: verify + +update: update-makefiles +.PHONY: update + + +include ./make/targets/help.mk diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS b/vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS new file mode 100644 index 0000000000..ff2b6a24c8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - tnozicka +approvers: + - tnozicka diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/README.md b/vendor/github.com/openshift/library-go/alpha-build-machinery/README.md new file mode 100644 index 0000000000..294a5834a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/README.md @@ -0,0 +1,37 @@ +# library-go/alpha-build-machinery +These are the building blocks for this and many of our other repositories to share code for Makefiles, helper scripts and other build related machinery. + +## Makefiles +`make/` directory contains several predefined makefiles `(*.mk)` to choose from and include one of them as a base in your final `Makefile`. These are the predefined flows providing you with e.g. `build`, `test` or `verify` targets. To start with it is recommended you base Makefile on the corresponding `*.example.mk` using copy&paste. + +As some advanced targets are generated, every Makefile contains `make help` target listing all the available ones. All of the "example" makefiles have a corresponding `.help` file listing all the targets available there. + +Also for advanced use and if none of the predefined flows doesn't fit your needs, you can compose the flow from modules in similar way to how the predefined flows do, + +### Golang +Standard makefile for building pure Golang projects. + - [make/golang.mk](make/golang.mk) + - [make/golang.example.mk](make/golang.example.mk) + - [make/golang.example.mk.help](make/golang.example.mk.help) + +### Default +Standard makefile for OpenShift Golang projects. + +Extends [#Golang](). + + - [make/default.mk](make/default.mk) + - [make/default.example.mk](make/default.example.mk) + - [make/default.example.mk.help](make/default.example.mk.help) + +### Operator +Standard makefile for OpenShift Golang projects. + +Extends [#Default](). + + - [make/operator.mk](make/operator.mk) + - [make/operator.example.mk](make/operator.example.mk) + - [make/operator.example.mk.help](make/operator.example.mk.help) + + +## Scripts +`scripts` contain more complicated logic that is used in some make targets. diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk new file mode 100644 index 0000000000..b8a8112c02 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk @@ -0,0 +1,40 @@ +all: build +.PHONY: all + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./default.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` + +# Codegen module needs setting these required variables +CODEGEN_OUTPUT_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/generated +CODEGEN_API_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/apis +CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 +# You can list all codegen related variables by: +# $ make -n --print-data-base | grep ^CODEGEN + +# This will call a macro called "build-image" which will generate image specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - Dockerfile path +# $3 - context directory for image build +# It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". +$(call build-image,origin-cluster-openshift-apiserver-operator,./Dockerfile,.) + +# This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +# It will generate targets {update,verify}-bindata-$(1) logically grouping them in unsuffixed versions of these targets +# and also hooked into {update,verify}-generated for broader integration. +$(call add-bindata,v3.11.0,./bindata/v3.11.0/...,bindata,v311_00_assets,pkg/operator/v311_00_assets/bindata.go) + diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log new file mode 100644 index 0000000000..3645ce965c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log @@ -0,0 +1,25 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +image-origin-cluster-openshift-apiserver-operator +images +test +test-unit +update +update-bindata +update-codegen +update-deps +update-deps-overrides +update-generated +update-gofmt +verify +verify-bindata +verify-codegen +verify-deps +verify-generated +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk new file mode 100644 index 0000000000..6e6c034373 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk @@ -0,0 +1,20 @@ +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + +# We extend the default verify/update for Golang + +verify: verify-codegen +verify: verify-bindata +.PHONY: verify + +update: update-codegen +update: update-bindata +.PHONY: update + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + targets/openshift/*.mk \ + golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile new file mode 100644 index 0000000000..17350782a7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile @@ -0,0 +1,3 @@ +include $(addprefix ../../, \ + golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test new file mode 100644 index 0000000000..1922d246a2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test @@ -0,0 +1,9 @@ +all: + $(MAKE) -C . build + [[ -f ./openshift ]] + [[ -f ./oc ]] + + $(MAKE) -C . clean + [[ ! -f ./openshift ]] + [[ ! -f ./oc ]] +.PHONY: all diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log new file mode 100644 index 0000000000..5b7d7f2e8e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log @@ -0,0 +1,9 @@ +make -C . build +go build github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +go build github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +[[ -f ./openshift ]] +[[ -f ./oc ]] +make -C . clean +rm -f oc openshift +[[ ! -f ./openshift ]] +[[ ! -f ./oc ]] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go new file mode 100644 index 0000000000..7905807777 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go @@ -0,0 +1,5 @@ +package main + +func main() { + +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go new file mode 100644 index 0000000000..7905807777 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go @@ -0,0 +1,5 @@ +package main + +func main() { + +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk new file mode 100644 index 0000000000..aba2c48903 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk @@ -0,0 +1,14 @@ +all: build +.PHONY: all + + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./golang.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log new file mode 100644 index 0000000000..a5cc906ddf --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log @@ -0,0 +1,14 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +test +test-unit +update +update-gofmt +verify +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk new file mode 100644 index 0000000000..15a0b49bcb --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk @@ -0,0 +1,28 @@ +all: build +.PHONY: all + +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + + +verify: verify-gofmt +verify: verify-govet +.PHONY: verify + +update: update-gofmt +.PHONY: update + + +test: test-unit +.PHONY: test + +clean: clean-binaries +.PHONY: clean + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + targets/help.mk \ + targets/golang/*.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk new file mode 100644 index 0000000000..8a904d5f9e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk @@ -0,0 +1,16 @@ +GO ?=go +GOFMT ?=gofmt +GOFMT_FLAGS ?=-s -l +GOLINT ?=golint + +GO_FILES ?=$(shell find . -name '*.go' -not -path './vendor/*' -print) +GO_PACKAGES ?=./... +GO_TEST_PACKAGES ?=$(GO_PACKAGES) + +GO_BUILD_PACKAGES ?=./cmd/... +GO_BUILD_PACKAGES_EXPANDED ?=$(shell $(GO) list $(GO_BUILD_PACKAGES)) +go_build_binaries =$(notdir $(GO_BUILD_PACKAGES_EXPANDED)) +GO_BUILD_FLAGS ?= +GO_TEST_FLAGS ?=-race + +GO_PACKAGE :=$(notdir $(abspath . )) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk new file mode 100644 index 0000000000..2f0326a9cb --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk @@ -0,0 +1,42 @@ +all: build +.PHONY: all + + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./operator.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` + + +# Codegen module needs setting these required variables +CODEGEN_OUTPUT_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/generated +CODEGEN_API_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/apis +CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 +# You can list all codegen related variables by: +# $ make -n --print-data-base | grep ^CODEGEN + +# This will call a macro called "build-image" which will generate image specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - Dockerfile path +# $3 - context directory for image build +# It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". +$(call build-image,origin-cluster-openshift-apiserver-operator,./Dockerfile,.) + +# This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +# It will generate targets {update,verify}-bindata-$(1) logically grouping them in unsuffixed versions of these targets +# and also hooked into {update,verify}-generated for broader integration. +$(call add-bindata,v3.11.0,./bindata/v3.11.0/...,bindata,v311_00_assets,pkg/operator/v311_00_assets/bindata.go) + diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log new file mode 100644 index 0000000000..3645ce965c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log @@ -0,0 +1,25 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +image-origin-cluster-openshift-apiserver-operator +images +test +test-unit +update +update-bindata +update-codegen +update-deps +update-deps-overrides +update-generated +update-gofmt +verify +verify-bindata +verify-codegen +verify-deps +verify-generated +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk new file mode 100644 index 0000000000..d763df4617 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk @@ -0,0 +1,11 @@ +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + default.mk \ + targets/openshift/operator/*.mk \ +) + diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk new file mode 100644 index 0000000000..49d484093c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk @@ -0,0 +1,21 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +define build-package + $(GO) build $(GO_BUILD_FLAGS) $(1) + +endef + +# We need to build each package separately so go build creates appropriate binaries +build: + $(foreach package,$(GO_BUILD_PACKAGES_EXPANDED),$(call build-package,$(package))) +.PHONY: build + +clean-binaries: + $(RM) $(go_build_binaries) + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk new file mode 100644 index 0000000000..f96c8ccd73 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk @@ -0,0 +1,19 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +test-unit: +ifndef JUNITFILE + $(GO) test $(GO_TEST_FLAGS) $(GO_TEST_PACKAGES) +else +ifeq (, $(shell which gotest2junit 2>/dev/null)) + $(error gotest2junit not found! Get it by `go get -u github.com/openshift/release/tools/gotest2junit`.) +endif + set -o pipefail; $(GO) test $(GO_TEST_FLAGS) -json $(GO_TEST_PACKAGES) | gotest2junit > $(JUNITFILE) +endif +.PHONY: test-unit + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk new file mode 100644 index 0000000000..2034cd10e1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk @@ -0,0 +1,34 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +go_files_count :=$(words $(GO_FILES)) + +verify-gofmt: + $(info Running `$(GOFMT) $(GOFMT_FLAGS)` on $(go_files_count) file(s).) + @TMP=$$( mktemp ); \ + $(GOFMT) $(GOFMT_FLAGS) $(GO_FILES) | tee $${TMP}; \ + if [ -s $${TMP} ]; then \ + echo "$@ failed - please run \`make update-gofmt\`"; \ + exit 1; \ + fi; +.PHONY: verify-gofmt + +update-gofmt: + $(info Running `$(GOFMT) $(GOFMT_FLAGS) -w` on $(go_files_count) file(s).) + @$(GOFMT) $(GOFMT_FLAGS) -w $(GO_FILES) +.PHONY: update-gofmt + + +verify-govet: + $(GO) vet $(GO_PACKAGES) +.PHONY: verify-govet + +verify-golint: + $(GOLINT) $(GO_PACKAGES) +.PHONY: verify-govet + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk new file mode 100644 index 0000000000..55bfbac094 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk @@ -0,0 +1,6 @@ +help: + $(info The following make targets are available:) + @$(MAKE) -f $(firstword $(MAKEFILE_LIST)) --print-data-base --question no-such-target 2>&1 | grep -v 'no-such-target' | \ + grep -v -e '^no-such-target' -e '^makefile' | \ + awk '/^[^.%][-A-Za-z0-9_]*:/ { print substr($$1, 1, length($$1)-1) }' | sort -u +.PHONY: help diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk new file mode 100644 index 0000000000..07b9a32282 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk @@ -0,0 +1,65 @@ +TMP_GOPATH :=$(shell mktemp -d) + + +.ensure-go-bindata: + ln -s $(abspath ./vendor) "$(TMP_GOPATH)/src" + export GOPATH=$(TMP_GOPATH) && go install "./vendor/github.com/jteeuwen/go-bindata/..." + +# $1 - input dirs +# $2 - prefix +# $3 - pkg +# $4 - output +# $5 - output prefix +define run-bindata + $(TMP_GOPATH)/bin/go-bindata -nocompress -nometadata \ + -prefix "$(2)" \ + -pkg "$(3)" \ + -o "$(5)$(4)" \ + -ignore "OWNERS" \ + $(1) && \ + gofmt -s -w "$(5)$(4)" +endef + +# $1 - name +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +define add-bindata-internal +update-bindata-$(1): .ensure-go-bindata + $(call run-bindata,$(2),$(3),$(4),$(5),) +.PHONY: update-bindata-$(1) + +update-bindata: update-bindata-$(1) +.PHONY: update-bindata + + +verify-bindata-$(1): .ensure-go-bindata +verify-bindata-$(1): TMP_DIR := $$(shell mktemp -d) +verify-bindata-$(1): + $(call run-bindata,$(2),$(3),$(4),$(5),$$(TMP_DIR)/) && \ + diff -Naup {.,$$(TMP_DIR)}/$(5) +.PHONY: verify-bindata-$(1) + +verify-bindata: verify-bindata-$(1) +.PHONY: verify-bindata +endef + + +update-generated: update-bindata +.PHONY: update-bindata + +update: update-generated +.PHONY: update + + +verify-generated: verify-bindata +.PHONY: verify-bindata + +verify: verify-generated +.PHONY: verify + + +define add-bindata +$(eval $(call add-bindata-internal,$(1),$(2),$(3),$(4),$(5))) +endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk new file mode 100644 index 0000000000..247de9417c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk @@ -0,0 +1,41 @@ +CODEGEN_PKG ?=./vendor/k8s.io/code-generator/ +CODEGEN_GENERATORS ?=all +CODEGEN_OUTPUT_BASE ?=../../.. +CODEGEN_GO_HEADER_FILE ?=/dev/null + +CODEGEN_API_PACKAGE ?=$(error CODEGEN_API_PACKAGE is required) +CODEGEN_GROUPS_VERSION ?=$(error CODEGEN_GROUPS_VERSION is required) +CODEGEN_OUTPUT_PACKAGE ?=$(error CODEGEN_OUTPUT_PACKAGE is required) + +define run-codegen +$(CODEGEN_PKG)/generate-groups.sh \ + "$(CODEGEN_GENERATORS)" \ + "$(CODEGEN_OUTPUT_PACKAGE)" \ + "$(CODEGEN_API_PACKAGE)" \ + "$(CODEGEN_GROUPS_VERSION)" \ + --output-base $(CODEGEN_OUTPUT_BASE) \ + --go-header-file $(CODEGEN_GO_HEADER_FILE) \ + $1 +endef + + +verify-codegen: + $(call run-codegen,--verify-only) +.PHONY: verify-codegen + +verify-generated: verify-codegen +.PHONY: verify-generated + +verify: verify-generated +.PHONY: verify + + +update-codegen: + $(call run-codegen) +.PHONY: update-codegen + +update-generated: update-codegen +.PHONY: update-generated + +update: update-generated +.PHONY: update diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk new file mode 100644 index 0000000000..fafa8f9dad --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk @@ -0,0 +1,35 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) +scripts_dir :=$(self_dir)/../../../scripts + +# We need to force localle so different envs sort files the same way for recursive traversals +deps_diff :=LC_COLLATE=C diff --no-dereference -N + +update-deps: + $(scripts_dir)/$@.sh +.PHONY: update-deps + +# $1 - temporary directory to restore vendor dependencies from glide.lock +define restore-deps + ln -s $(abspath ./) "$(1)"/current + cp -R -H ./ "$(1)"/updated + $(RM) -r "$(1)"/updated/vendor + cd "$(1)"/updated && glide install --strip-vendor && find ./vendor -name '.hg_archival.txt' -delete + cd "$(1)" && $(deps_diff) -r {current,updated}/vendor/ > updated/glide.diff || true +endef + +verify-deps: tmp_dir:=$(shell mktemp -d) +verify-deps: + $(call restore-deps,$(tmp_dir)) + @echo $(deps_diff) '$(tmp_dir)'/{current,updated}/glide.diff + @ $(deps_diff) '$(tmp_dir)'/{current,updated}/glide.diff || ( \ + echo "ERROR: Content of 'vendor/' directory doesn't match 'glide.lock' and the overrides in 'glide.diff'!" && \ + echo "If this is an intentional change (a carry patch) please update the 'glide.diff' using 'make update-deps-overrides'." && \ + exit 1 \ + ) +.PHONY: verify-deps + +update-deps-overrides: tmp_dir:=$(shell mktemp -d) +update-deps-overrides: + $(call restore-deps,$(tmp_dir)) + cp "$(tmp_dir)"/{updated,current}/glide.diff +.PHONY: update-deps-overrides diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk new file mode 100644 index 0000000000..cbd2d046c3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk @@ -0,0 +1,19 @@ +IMAGE_REGISTRY ?= +IMAGE_ORG ?=openshift +IMAGE_TAG ?=latest + +# $1 - image name +# $2 - Dockerfile path +# $3 - context +define build-image-internal +image-$(1): + imagebuilder -f $(2) -t $(addsuffix /,$(IMAGE_REGISTRY))$(addsuffix /,$(IMAGE_ORG))$(1)$(addprefix :,$(IMAGE_TAG)) $(3) +.PHONY: image-$(1) + +images: image-$(1) +.PHONY: images +endef + +define build-image +$(eval $(call build-image-internal,$(1),$(2),$(3))) +endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk new file mode 100644 index 0000000000..07fc5605a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk @@ -0,0 +1,7 @@ +# If we need unified behaviour specific to operators, this folder is the place. + +# It seems that our previous origin-release jq based replacement is suppose to be done +# with `oc adm release new` so it might drop this target. +#origin-release: +# $(error Not implemented.) +#.PHONY: origin-release diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh new file mode 100755 index 0000000000..46812e939c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e + +readonly GLIDE_MINOR_VERSION="13" +readonly REQUIRED_GLIDE_VERSION="0.$GLIDE_MINOR_VERSION" + +function verify_glide_version() { + if ! command -v glide &> /dev/null; then + echo "[FATAL] Glide was not found in \$PATH. Please install version ${REQUIRED_GLIDE_VERSION} or newer." + exit 1 + fi + + local glide_version + glide_version=($(glide --version)) + if ! echo "${glide_version[2]#v}" | awk -F. -v min=$GLIDE_MINOR_VERSION '{ exit $2 < min }'; then + echo "Detected glide version: ${glide_version[*]}." + echo "Please install Glide version ${REQUIRED_GLIDE_VERSION} or newer." + exit 1 + fi +} + +verify_glide_version + +glide update --strip-vendor + +# glide doesn't handle mercurial properly and leaves internal files (equivalent of .git/) laying around +# Given those files differ by mercurial version it was cloned with, verify-deps would break +find ./vendor -name '.hg_archival.txt' -delete diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go new file mode 100755 index 0000000000..cb37958a23 --- /dev/null +++ b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go @@ -0,0 +1,391 @@ +package generator + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + + "github.com/evanphx/json-patch" + "gopkg.in/yaml.v2" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme) +) + +func init() { + v1beta1.AddToScheme(scheme) +} + +func Run() error { + apisDir := flag.String("apis-dir", "pkg/apis", "the (relative) path to the package with API definitions") + apis := flag.String("apis", "*", "the apis to generate from the apis-dir, in bash glob syntax") + manifestDir := flag.String("manifests-dir", "manifests", "the directory with existing CRD manifests") + outputDir := flag.String("output-dir", "", "optional directory to output the kubebuilder CRDs. By default a temporary directory is used.") + verifyOnly := flag.Bool("verify-only", false, "do not write files, only compare and return with return code 1 if dirty") + domain := flag.String("domain", "", "the domain appended to group names.") + repo := flag.String("repo", "", "the repository package name (optional).") + + flag.Parse() + + // load existing manifests from manifests/ dir + existing, err := crdsFromDirectory(*manifestDir) + if err != nil { + return err + } + + // create temp dir + pwd, err := os.Getwd() + if err != nil { + return err + } + tmpDir, err := ioutil.TempDir(pwd, "") + if err != nil { + return fmt.Errorf("error creating temp directory: %v\n", err) + } + defer os.RemoveAll(tmpDir) + relTmpDir := tmpDir[len(pwd)+1:] + + // find repo in GOPATH + sep := string([]rune{os.PathSeparator}) + GOPATH := strings.TrimRight(os.Getenv("GOPATH"), sep) + if len(*repo) == 0 && len(GOPATH) > 0 && strings.HasPrefix(pwd, filepath.Join(GOPATH, "src")+sep) { + *repo = pwd[len(filepath.Join(GOPATH, "src")+sep):] + fmt.Printf("Derived repo %q from GOPATH and working directory.\n", *repo) + } + + // validate params + if len(*repo) == 0 { + return fmt.Errorf("repo cannot be empty. Run crd-schema-gen in GOPATH or specify repo explicitly.") + } + if len(*domain) == 0 { + return fmt.Errorf("domain cannot be empty.") + } + + // copy APIs to temp dir + fmt.Printf("Copying vendor/github.com/openshift/api/config to temporary pkg/apis...\n") + if err := os.MkdirAll(filepath.Join(tmpDir, "pkg/apis"), 0755); err != nil { + return err + } + cmd := fmt.Sprintf("cp -av \"%s/\"%s \"%s\"", *apisDir, *apis, filepath.Join(tmpDir, "pkg/apis")) + out, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() + if err != nil { + fmt.Print(string(out)) + return err + } + if err := ioutil.WriteFile(filepath.Join(tmpDir, "PROJECT"), []byte(fmt.Sprintf(` +domain: %s +repo: %s/%s +`, *domain, *repo, relTmpDir)), 0644); err != nil { + return err + } + + // generate kubebuilder KindGroupYaml manifests into temp dir + g := crdgenerator.Generator{ + RootPath: tmpDir, + OutputDir: filepath.Join(tmpDir, "manifests"), + SkipMapValidation: true, + } + + if len(*outputDir) != 0 { + g.OutputDir = *outputDir + fmt.Printf("Creating kubebuilder manifests %q ...\n", *outputDir) + } else { + fmt.Printf("Creating kubebuilder manifests ...\n") + } + + if err := g.ValidateAndInitFields(); err != nil { + return err + } + if err := g.Do(); err != nil { + return err + } + + // the generator changes the directory for some reason + os.Chdir(pwd) + + // load kubebuilder manifests from temp dir + fromKubebuilder, err := crdsFromDirectory(g.OutputDir) + if err != nil { + return err + } + + existingFileNames := map[string]string{} + for fn, crd := range existing { + existingFileNames[crd.KindGroup] = fn + } + + // update existing manifests with validations of kubebuilder output + dirty := false + noneFound := true + for fn, withValidation := range fromKubebuilder { + existingFileName, ok := existingFileNames[withValidation.KindGroup] + if !ok { + continue + } + noneFound = false + + crd := existing[existingFileName] + + // TODO: support multiple versions + validation, _, err := nested(withValidation.Yaml, "spec", "validation") + if err != nil { + return fmt.Errorf("failed to access spec.validation in %s: %v", fn, err) + } + + // yaml merge patch exists? + patchFileName := existingFileName + "-merge-patch" + if _, err := os.Stat(patchFileName); err == nil { + fmt.Printf("Applying patch %q ...\n", patchFileName) + + yamlPatch, err := ioutil.ReadFile(patchFileName) + if err != nil { + return fmt.Errorf("failed to read yaml-merge-patch %q: %v", patchFileName, err) + } + var patch yaml.MapSlice + if err := yaml.Unmarshal(yamlPatch, &patch); err != nil { + return fmt.Errorf("failed to unmarshal yaml merge patch %q: %v", patchFileName, err) + } + if !onlyHasNoneOr(patch, "spec", "validation") { + return fmt.Errorf("patch in %q can only have spec.validation", patchFileName) + } + validationPatch, _, err := nested(patch, "spec", "validation") + if err != nil { + return fmt.Errorf("failed to get spec.validation from %q: %v", patchFileName, err) + } + if yamlPatch, err = yaml.Marshal(validationPatch); err != nil { + return fmt.Errorf("failed to marshal spec.validation of %q: %v", patchFileName, err) + } + jsonPatch, err := utilyaml.ToJSON(yamlPatch) + if err != nil { + return fmt.Errorf("failed to convert yaml of %q to json: %v", patchFileName, err) + } + yamlValidation, err := yaml.Marshal(validation) + if err != nil { + return fmt.Errorf("failed to marshal generated validation schema of %q: %v", existingFileName, err) + } + jsonValidation, err := utilyaml.ToJSON(yamlValidation) + if err != nil { + return fmt.Errorf("failed to convert yaml validation of %q to json: %v", existingFileName, err) + } + if jsonValidation, err = jsonpatch.MergePatch(jsonValidation, jsonPatch); err != nil { + return fmt.Errorf("failed to patch %q with %q: %v", existingFileName, patchFileName, err) + } + if err := yaml.Unmarshal(jsonValidation, &validation); err != nil { + return fmt.Errorf("failed to unmarshal patched validation schema of %q: %v", existingFileName, err) + } + } + + if validation == nil { + continue + } + + updated, err := set(crd.Yaml, validation, "spec", "validation") + if err != nil { + return fmt.Errorf("failed to set spec.validation in %s: %v", existingFileName, err) + } + if reflect.DeepEqual(updated, crd.Yaml) { + fmt.Printf("Validation of %s in %s did not change.\n", crd.KindGroup, existingFileName) + continue + } + + bs, err := yaml.Marshal(updated) + if err != nil { + return err + } + + // write updated file, either to old location, or to temp dir in verify mode + newFn := existingFileName + if *verifyOnly { + newFn = filepath.Join(tmpDir, filepath.Base(existingFileName)) + } else { + fmt.Printf("Updating validation of %s in %s.\n", crd.KindGroup, existingFileName) + } + if err := ioutil.WriteFile(newFn, bs, 0644); err != nil { + return err + } + + // compare old and new file + if *verifyOnly { + out, err := exec.Command("diff", "-u", existingFileName, newFn).CombinedOutput() + if err != nil { + fmt.Println(string(out)) + dirty = true + } + } + } + + if noneFound { + fmt.Printf("None of the found API types has a corresponding CRD manifest. These API types where found:\n\n") + for _, withValidation := range fromKubebuilder { + fmt.Printf(" %s\n", withValidation.KindGroup) + } + fmt.Printf("These CRDs were found:\n\n") + for existingKindGroup := range existingFileNames { + fmt.Printf(" %s\n", existingKindGroup) + } + return fmt.Errorf("no API type for found CRD manifests") + } + + if *verifyOnly && dirty { + return fmt.Errorf("verification failed") + } + + return nil +} + +func nested(x interface{}, pth ...string) (interface{}, bool, error) { + if len(pth) == 0 { + return x, true, nil + } + m, ok := x.(yaml.MapSlice) + if !ok { + return nil, false, fmt.Errorf("%s is not an object, but %T", strings.Join(pth, "."), x) + } + for _, item := range m { + s, ok := item.Key.(string) + if !ok { + continue + } + if s == pth[0] { + ret, found, err := nested(item.Value, pth[1:]...) + if err != nil { + return ret, found, fmt.Errorf("%s.%s", pth[0], err) + } + return ret, found, nil + } + } + return nil, false, nil +} + +func set(x interface{}, v interface{}, pth ...string) (interface{}, error) { + if len(pth) == 0 { + return v, nil + } + + if x == nil { + result, err := set(nil, v, pth[1:]...) + if err != nil { + return nil, fmt.Errorf("%s.%s", pth[0], err) + } + return yaml.MapSlice{yaml.MapItem{Key: pth[0], Value: result}}, nil + } + + m, ok := x.(yaml.MapSlice) + if !ok { + return nil, fmt.Errorf("%s is not an object", strings.Join(pth, ".")) + } + + foundAt := -1 + for i, item := range m { + s, ok := item.Key.(string) + if !ok { + continue + } + if s == pth[0] { + foundAt = i + break + } + } + + if foundAt < 0 { + ret := make(yaml.MapSlice, len(m), len(m)+1) + copy(ret, m) + result, err := set(nil, v, pth[1:]...) + if err != nil { + return nil, fmt.Errorf("%s.%s", pth[0], err) + } + return append(ret, yaml.MapItem{Key: pth[0], Value: result}), nil + } + + result, err := set(m[foundAt].Value, v, pth[1:]...) + ret := make(yaml.MapSlice, len(m)) + copy(ret, m) + if err != nil { + return nil, fmt.Errorf("%s.%s", pth[0], err) + } + ret[foundAt].Value = result + return ret, nil +} + +// onlyHasNoneOr checks for existance of the given path, but nothing next to it is allowed +func onlyHasNoneOr(x interface{}, pth ...string) bool { + if len(pth) == 0 { + return true + } + m, ok := x.(yaml.MapSlice) + if !ok { + return false + } + switch len(m) { + case 0: + return true + case 1: + s, ok := m[0].Key.(string) + if !ok || s != pth[0] { + return false + } + return onlyHasNoneOr(m[0].Value, pth[1:]...) + default: + return false + } +} + +type KindGroupYaml struct { + KindGroup string + Yaml interface{} +} + +// crdsFromDirectory returns CRDs by file path +func crdsFromDirectory(dir string) (map[string]KindGroupYaml, error) { + ret := map[string]KindGroupYaml{} + infos, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + for _, info := range infos { + if info.IsDir() { + continue + } + if !strings.HasSuffix(info.Name(), ".yaml") { + continue + } + bs, err := ioutil.ReadFile(filepath.Join(dir, info.Name())) + if err != nil { + return nil, err + } + + obj, _, err := codecs.UniversalDeserializer().Decode(bs, nil, nil) + if err != nil { + continue + } + crd, ok := obj.(*v1beta1.CustomResourceDefinition) + if !ok { + continue + } + + var y yaml.MapSlice + if err := yaml.Unmarshal(bs, &y); err != nil { + fmt.Printf("Warning: failed to unmarshal %q, skipping\n", info.Name()) + continue + } + key := crd.Spec.Names.Kind + "." + crd.Spec.Group + ret[filepath.Join(dir, info.Name())] = KindGroupYaml{key, y} + } + if err != nil { + return nil, err + } + return ret, err +} diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go new file mode 100755 index 0000000000..228a800cca --- /dev/null +++ b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "os" + + "github.com/openshift/library-go/cmd/crd-schema-gen/generator" +) + +func main() { + if err := generator.Run(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/vendor/github.com/openshift/library-go/glide.lock b/vendor/github.com/openshift/library-go/glide.lock new file mode 100644 index 0000000000..483a42a5e9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/glide.lock @@ -0,0 +1,858 @@ +hash: 834b42ac04c13e26423b9cddffbd75a093f4d889cffb059911fae65aac364c7b +updated: 2019-04-17T10:52:09.03874547-04:00 +imports: +- name: bitbucket.org/ww/goautoneg + version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +- name: github.com/Azure/go-ansiterm + version: d6e3b3328b783f23731bc4d058875b0371ff8109 + subpackages: + - winterm +- name: github.com/beorn7/perks + version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 + subpackages: + - quantile +- name: github.com/blang/semver + version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 +- name: github.com/certifi/gocertifi + version: ee1a9a0726d2ae45f54118cac878c990d4016ded +- name: github.com/coreos/etcd + version: 27fc7e2296f506182f58ce846e48f36b34fe6842 + subpackages: + - alarm + - auth + - auth/authpb + - client + - clientv3 + - clientv3/concurrency + - clientv3/namespace + - clientv3/naming + - compactor + - discovery + - embed + - error + - etcdserver + - etcdserver/api + - etcdserver/api/etcdhttp + - etcdserver/api/v2http + - etcdserver/api/v2http/httptypes + - etcdserver/api/v2v3 + - etcdserver/api/v3client + - etcdserver/api/v3election + - etcdserver/api/v3election/v3electionpb + - etcdserver/api/v3election/v3electionpb/gw + - etcdserver/api/v3lock + - etcdserver/api/v3lock/v3lockpb + - etcdserver/api/v3lock/v3lockpb/gw + - etcdserver/api/v3rpc + - etcdserver/api/v3rpc/rpctypes + - etcdserver/auth + - etcdserver/etcdserverpb + - etcdserver/etcdserverpb/gw + - etcdserver/membership + - etcdserver/stats + - integration + - lease + - lease/leasehttp + - lease/leasepb + - mvcc + - mvcc/backend + - mvcc/mvccpb + - pkg/adt + - pkg/contention + - pkg/cors + - pkg/cpuutil + - pkg/crc + - pkg/debugutil + - pkg/fileutil + - pkg/httputil + - pkg/idutil + - pkg/ioutil + - pkg/logutil + - pkg/netutil + - pkg/pathutil + - pkg/pbutil + - pkg/runtime + - pkg/schedule + - pkg/srv + - pkg/testutil + - pkg/tlsutil + - pkg/transport + - pkg/types + - pkg/wait + - proxy/grpcproxy + - proxy/grpcproxy/adapter + - proxy/grpcproxy/cache + - raft + - raft/raftpb + - rafthttp + - snap + - snap/snappb + - store + - version + - wal + - wal/walpb +- name: github.com/coreos/go-systemd + version: 39ca1b05acc7ad1220e09f133283b8859a8b71ab + subpackages: + - daemon + - journal +- name: github.com/davecgh/go-spew + version: 782f4967f2dc4564575ca782fe2d04090b5faca8 + subpackages: + - spew +- name: github.com/docker/docker + version: a9fbbdc8dd8794b20af358382ab780559bca589d + subpackages: + - pkg/term + - pkg/term/windows +- name: github.com/elazarl/go-bindata-assetfs + version: 3dcc96556217539f50599357fb481ac0dc7439b9 +- name: github.com/emicklei/go-restful + version: ff4f55a206334ef123e4f79bbf348980da81ca46 + subpackages: + - log +- name: github.com/emicklei/go-restful-swagger12 + version: dcef7f55730566d41eae5db10e7d6981829720f6 +- name: github.com/evanphx/json-patch + version: 5858425f75500d40c52783dce87d085a483ce135 +- name: github.com/getsentry/raven-go + version: 32a13797442ccb601b11761d74232773c1402d14 +- name: github.com/ghodss/yaml + version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 +- name: github.com/go-openapi/jsonpointer + version: ef5f0afec364d3b9396b7b77b43dbe26bf1f8004 +- name: github.com/go-openapi/jsonreference + version: 8483a886a90412cd6858df4ea3483dce9c8e35a3 +- name: github.com/go-openapi/spec + version: 5bae59e25b21498baea7f9d46e9c147ec106a42e +- name: github.com/go-openapi/swag + version: 5899d5c5e619fda5fa86e14795a835f473ca284c +- name: github.com/gobuffalo/envy + version: 043cb4b8af871b49563291e32c66bb84378a60ac +- name: github.com/gogo/protobuf + version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 + subpackages: + - gogoproto + - proto + - protoc-gen-gogo/descriptor + - sortkeys +- name: github.com/golang/groupcache + version: 02826c3e79038b59d737d3b1c0a1d937f71a4433 + subpackages: + - lru +- name: github.com/golang/protobuf + version: b4deda0973fb4c70b50d226b1af49f3da59f5265 + subpackages: + - jsonpb + - proto + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/struct + - ptypes/timestamp +- name: github.com/gonum/blas + version: f22b278b28ac9805aadd613a754a60c35b24ae69 + subpackages: + - blas64 + - native + - native/internal/math32 +- name: github.com/gonum/floats + version: c233463c7e827fd71a8cdb62dfda0e98f7c39ad5 +- name: github.com/gonum/graph + version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 + subpackages: + - encoding/dot + - formats/dot + - formats/dot/ast + - formats/dot/internal/astx + - formats/dot/internal/errors + - formats/dot/internal/lexer + - formats/dot/internal/parser + - formats/dot/internal/token + - internal/ordered + - simple +- name: github.com/gonum/internal + version: f884aa71402950fb2796dbea0d5aa9ef9cfad8ca + subpackages: + - asm/f32 + - asm/f64 +- name: github.com/gonum/lapack + version: e4cdc5a0bff924bb10be88482e635bd40429f65e + subpackages: + - lapack64 + - native +- name: github.com/gonum/matrix + version: c518dec07be9a636c38a4650e217be059b5952ec + subpackages: + - mat64 +- name: github.com/google/btree + version: 7d79101e329e5a3adf994758c578dab82b90c017 +- name: github.com/google/gofuzz + version: 44d81051d367757e1c7c6a5a86423ece9afcf63c +- name: github.com/googleapis/gnostic + version: 0c5108395e2debce0d731cf0287ddf7242066aba + subpackages: + - OpenAPIv2 + - compiler + - extensions +- name: github.com/gregjones/httpcache + version: 787624de3eb7bd915c329cba748687a3b22666a6 + subpackages: + - diskcache +- name: github.com/grpc-ecosystem/go-grpc-prometheus + version: 2500245aa6110c562d17020fb31a2c133d737799 +- name: github.com/hashicorp/golang-lru + version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 + subpackages: + - simplelru +- name: github.com/imdario/mergo + version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/joho/godotenv + version: 5c0e6c6ab1a0a9ef0a8822cba3a05d62f7dad941 +- name: github.com/json-iterator/go + version: ab8a2e0c74be9d3be70b3184d9acc634935ded82 +- name: github.com/jteeuwen/go-bindata + version: a0ff2567cfb70903282db057e799fd826784d41d +- name: github.com/mailru/easyjson + version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d + subpackages: + - buffer + - jlexer + - jwriter +- name: github.com/markbates/inflect + version: d582c680dc4d29c2279628ae00e743005bfcd4fe +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil +- name: github.com/modern-go/concurrent + version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 +- name: github.com/modern-go/reflect2 + version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 +- name: github.com/NYTimes/gziphandler + version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb +- name: github.com/openshift/api + version: 7924f9106f8e132f4f33a0c7fb8841b49bfc2d83 + subpackages: + - apps + - apps/v1 + - authorization + - authorization/v1 + - build + - build/v1 + - config + - config/v1 + - image + - image/docker10 + - image/dockerpre012 + - image/v1 + - kubecontrolplane + - kubecontrolplane/v1 + - legacyconfig/v1 + - network + - network/v1 + - oauth + - oauth/v1 + - openshiftcontrolplane + - openshiftcontrolplane/v1 + - operator + - operator/v1 + - operator/v1alpha1 + - osin + - osin/v1 + - pkg/serialization + - project + - project/v1 + - quota + - quota/v1 + - route + - route/v1 + - security + - security/v1 + - servicecertsigner + - servicecertsigner/v1alpha1 + - template + - template/v1 + - user + - user/v1 + - webconsole + - webconsole/v1 +- name: github.com/openshift/client-go + version: 0255926f53935175fe90b8e7672c4c06c17d79e6 + subpackages: + - config/clientset/versioned + - config/clientset/versioned/fake + - config/clientset/versioned/scheme + - config/clientset/versioned/typed/config/v1 + - config/clientset/versioned/typed/config/v1/fake + - config/informers/externalversions/config/v1 + - config/informers/externalversions/internalinterfaces + - config/listers/config/v1 +- name: github.com/pborman/uuid + version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 +- name: github.com/peterbourgon/diskv + version: 5f041e8faa004a95c88a202771f4cc3e991971e6 +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d +- name: github.com/pkg/profile + version: f6fe06335df110bcf1ed6d4e852b760bfc15beee +- name: github.com/prometheus/client_golang + version: e7e903064f5e9eb5da98208bae10b475d4db0f8c + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 + subpackages: + - go +- name: github.com/prometheus/common + version: 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207 + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: 65c1f6f8f0fc1e2185eb9863a3bc751496404259 + subpackages: + - xfs +- name: github.com/PuerkitoBio/purell + version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 +- name: github.com/PuerkitoBio/urlesc + version: 5bd2802263f21d8788851d5305584c82a5c75d7e +- name: github.com/rogpeppe/go-internal + version: 438578804ca6f31be148c27683afc419ce47c06e + subpackages: + - modfile + - module + - semver +- name: github.com/sigma/go-inotify + version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 +- name: github.com/sirupsen/logrus + version: 89742aefa4b206dcf400792f3bd35b542998eb3b +- name: github.com/spf13/afero + version: 588a75ec4f32903aa5e39a2619ba6a4631e28424 + subpackages: + - mem +- name: github.com/spf13/cobra + version: c439c4fa093711d42e1b01acb1235b52004753c1 +- name: github.com/spf13/pflag + version: 583c0c0531f06d5278b7d917446061adc344b5cd +- name: golang.org/x/crypto + version: de0752318171da717af4ce24d0a2e8626afaeb11 + subpackages: + - bcrypt + - blowfish + - ssh/terminal +- name: golang.org/x/net + version: 0ed95abb35c445290478a5348a7b38bb154135fd + subpackages: + - context + - http2 + - http2/hpack + - idna + - internal/timeseries + - lex/httplex + - trace + - websocket +- name: golang.org/x/oauth2 + version: a6bd8cefa1811bd24b86f8902872e4e8225f74c4 + subpackages: + - internal +- name: golang.org/x/sys + version: 95c6576299259db960f6c5b9b69ea52422860fce + subpackages: + - unix + - windows +- name: golang.org/x/text + version: b19bf474d317b857955b12035d2c5acb57ce8b01 + subpackages: + - cases + - internal + - internal/tag + - language + - runes + - secure/bidirule + - secure/precis + - transform + - unicode/bidi + - unicode/norm + - width +- name: golang.org/x/time + version: f51c12702a4d776e4c1fa9b0fabab841babae631 + subpackages: + - rate +- name: golang.org/x/tools + version: 2382e3994d48b1d22acc2c86bcad0a2aff028e32 + subpackages: + - container/intsets + - go/ast/astutil + - imports +- name: google.golang.org/appengine + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 + subpackages: + - internal + - internal/base + - internal/datastore + - internal/log + - internal/remote_api + - internal/urlfetch + - urlfetch +- name: google.golang.org/genproto + version: 09f6ed296fc66555a25fe4ce95173148778dfa85 + subpackages: + - googleapis/rpc/status +- name: google.golang.org/grpc + version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8 + subpackages: + - balancer + - balancer/base + - balancer/roundrobin + - codes + - connectivity + - credentials + - encoding + - encoding/proto + - grpclog + - health + - health/grpc_health_v1 + - internal + - internal/backoff + - internal/channelz + - internal/grpcrand + - keepalive + - metadata + - naming + - peer + - resolver + - resolver/dns + - resolver/passthrough + - stats + - status + - tap + - transport +- name: gopkg.in/inf.v0 + version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/natefinch/lumberjack.v2 + version: 20b71e5b60d756d3d2f80def009790325acc2b23 +- name: gopkg.in/yaml.v2 + version: 51d6538a90f86fe93ac480b35f37b2be17fef232 +- name: k8s.io/api + version: 5cb15d34447165a97c76ed5a60e4e99c8a01ecfe + subpackages: + - admission/v1beta1 + - admissionregistration/v1alpha1 + - admissionregistration/v1beta1 + - apps/v1 + - apps/v1beta1 + - apps/v1beta2 + - auditregistration/v1alpha1 + - authentication/v1 + - authentication/v1beta1 + - authorization/v1 + - authorization/v1beta1 + - autoscaling/v1 + - autoscaling/v2beta1 + - autoscaling/v2beta2 + - batch/v1 + - batch/v1beta1 + - batch/v2alpha1 + - certificates/v1beta1 + - coordination/v1beta1 + - core/v1 + - events/v1beta1 + - extensions/v1beta1 + - imagepolicy/v1alpha1 + - networking/v1 + - policy/v1beta1 + - rbac/v1 + - rbac/v1alpha1 + - rbac/v1beta1 + - scheduling/v1alpha1 + - scheduling/v1beta1 + - settings/v1alpha1 + - storage/v1 + - storage/v1alpha1 + - storage/v1beta1 +- name: k8s.io/apiextensions-apiserver + version: 3c74db8dd172051b029f91536c681a1b43694809 + repo: https://github.com/openshift/kubernetes-apiextensions-apiserver + subpackages: + - pkg/apis/apiextensions + - pkg/apis/apiextensions/v1beta1 + - pkg/client/clientset/clientset/scheme + - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +- name: k8s.io/apimachinery + version: 86fb29eff6288413d76bd8506874fddd9fccdff0 + subpackages: + - pkg/api/equality + - pkg/api/errors + - pkg/api/meta + - pkg/api/resource + - pkg/api/validation + - pkg/api/validation/path + - pkg/apis/meta/internalversion + - pkg/apis/meta/v1 + - pkg/apis/meta/v1/unstructured + - pkg/apis/meta/v1/validation + - pkg/apis/meta/v1beta1 + - pkg/conversion + - pkg/conversion/queryparams + - pkg/fields + - pkg/labels + - pkg/runtime + - pkg/runtime/schema + - pkg/runtime/serializer + - pkg/runtime/serializer/json + - pkg/runtime/serializer/protobuf + - pkg/runtime/serializer/recognizer + - pkg/runtime/serializer/streaming + - pkg/runtime/serializer/versioning + - pkg/selection + - pkg/types + - pkg/util/cache + - pkg/util/clock + - pkg/util/diff + - pkg/util/errors + - pkg/util/framer + - pkg/util/intstr + - pkg/util/json + - pkg/util/mergepatch + - pkg/util/naming + - pkg/util/net + - pkg/util/rand + - pkg/util/runtime + - pkg/util/sets + - pkg/util/strategicpatch + - pkg/util/uuid + - pkg/util/validation + - pkg/util/validation/field + - pkg/util/wait + - pkg/util/waitgroup + - pkg/util/yaml + - pkg/version + - pkg/watch + - third_party/forked/golang/json + - third_party/forked/golang/reflect +- name: k8s.io/apiserver + version: 79427f02047f9189a75b8cdaadccaf65a126853e + subpackages: + - pkg/admission + - pkg/admission/configuration + - pkg/admission/initializer + - pkg/admission/metrics + - pkg/admission/plugin/initialization + - pkg/admission/plugin/namespace/lifecycle + - pkg/admission/plugin/webhook/config + - pkg/admission/plugin/webhook/config/apis/webhookadmission + - pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 + - pkg/admission/plugin/webhook/errors + - pkg/admission/plugin/webhook/generic + - pkg/admission/plugin/webhook/mutating + - pkg/admission/plugin/webhook/namespace + - pkg/admission/plugin/webhook/request + - pkg/admission/plugin/webhook/rules + - pkg/admission/plugin/webhook/util + - pkg/admission/plugin/webhook/validating + - pkg/apis/apiserver + - pkg/apis/apiserver/install + - pkg/apis/apiserver/v1alpha1 + - pkg/apis/audit + - pkg/apis/audit/install + - pkg/apis/audit/v1 + - pkg/apis/audit/v1alpha1 + - pkg/apis/audit/v1beta1 + - pkg/apis/audit/validation + - pkg/audit + - pkg/audit/event + - pkg/audit/policy + - pkg/audit/util + - pkg/authentication/authenticator + - pkg/authentication/authenticatorfactory + - pkg/authentication/group + - pkg/authentication/request/anonymous + - pkg/authentication/request/bearertoken + - pkg/authentication/request/headerrequest + - pkg/authentication/request/union + - pkg/authentication/request/websocket + - pkg/authentication/request/x509 + - pkg/authentication/serviceaccount + - pkg/authentication/token/cache + - pkg/authentication/token/tokenfile + - pkg/authentication/user + - pkg/authorization/authorizer + - pkg/authorization/authorizerfactory + - pkg/authorization/path + - pkg/authorization/union + - pkg/endpoints + - pkg/endpoints/discovery + - pkg/endpoints/filters + - pkg/endpoints/handlers + - pkg/endpoints/handlers/negotiation + - pkg/endpoints/handlers/responsewriters + - pkg/endpoints/metrics + - pkg/endpoints/openapi + - pkg/endpoints/request + - pkg/features + - pkg/registry/generic + - pkg/registry/generic/registry + - pkg/registry/rest + - pkg/server + - pkg/server/filters + - pkg/server/healthz + - pkg/server/httplog + - pkg/server/mux + - pkg/server/options + - pkg/server/resourceconfig + - pkg/server/routes + - pkg/server/routes/data/swagger + - pkg/server/storage + - pkg/storage + - pkg/storage/cacher + - pkg/storage/errors + - pkg/storage/etcd + - pkg/storage/etcd/metrics + - pkg/storage/etcd3 + - pkg/storage/names + - pkg/storage/storagebackend + - pkg/storage/storagebackend/factory + - pkg/storage/value + - pkg/util/dryrun + - pkg/util/feature + - pkg/util/flag + - pkg/util/flushwriter + - pkg/util/logs + - pkg/util/openapi + - pkg/util/trace + - pkg/util/webhook + - pkg/util/wsstream + - plugin/pkg/audit/buffered + - plugin/pkg/audit/dynamic + - plugin/pkg/audit/dynamic/enforced + - plugin/pkg/audit/log + - plugin/pkg/audit/truncate + - plugin/pkg/audit/webhook + - plugin/pkg/authenticator/token/webhook + - plugin/pkg/authorizer/webhook +- name: k8s.io/client-go + version: b40b2a5939e43f7ffe0028ad67586b7ce50bb675 + subpackages: + - discovery + - discovery/fake + - dynamic + - dynamic/fake + - informers + - informers/admissionregistration + - informers/admissionregistration/v1alpha1 + - informers/admissionregistration/v1beta1 + - informers/apps + - informers/apps/v1 + - informers/apps/v1beta1 + - informers/apps/v1beta2 + - informers/auditregistration + - informers/auditregistration/v1alpha1 + - informers/autoscaling + - informers/autoscaling/v1 + - informers/autoscaling/v2beta1 + - informers/autoscaling/v2beta2 + - informers/batch + - informers/batch/v1 + - informers/batch/v1beta1 + - informers/batch/v2alpha1 + - informers/certificates + - informers/certificates/v1beta1 + - informers/coordination + - informers/coordination/v1beta1 + - informers/core + - informers/core/v1 + - informers/events + - informers/events/v1beta1 + - informers/extensions + - informers/extensions/v1beta1 + - informers/internalinterfaces + - informers/networking + - informers/networking/v1 + - informers/policy + - informers/policy/v1beta1 + - informers/rbac + - informers/rbac/v1 + - informers/rbac/v1alpha1 + - informers/rbac/v1beta1 + - informers/scheduling + - informers/scheduling/v1alpha1 + - informers/scheduling/v1beta1 + - informers/settings + - informers/settings/v1alpha1 + - informers/storage + - informers/storage/v1 + - informers/storage/v1alpha1 + - informers/storage/v1beta1 + - kubernetes + - kubernetes/fake + - kubernetes/scheme + - kubernetes/typed/admissionregistration/v1alpha1 + - kubernetes/typed/admissionregistration/v1alpha1/fake + - kubernetes/typed/admissionregistration/v1beta1 + - kubernetes/typed/admissionregistration/v1beta1/fake + - kubernetes/typed/apps/v1 + - kubernetes/typed/apps/v1/fake + - kubernetes/typed/apps/v1beta1 + - kubernetes/typed/apps/v1beta1/fake + - kubernetes/typed/apps/v1beta2 + - kubernetes/typed/apps/v1beta2/fake + - kubernetes/typed/auditregistration/v1alpha1 + - kubernetes/typed/auditregistration/v1alpha1/fake + - kubernetes/typed/authentication/v1 + - kubernetes/typed/authentication/v1/fake + - kubernetes/typed/authentication/v1beta1 + - kubernetes/typed/authentication/v1beta1/fake + - kubernetes/typed/authorization/v1 + - kubernetes/typed/authorization/v1/fake + - kubernetes/typed/authorization/v1beta1 + - kubernetes/typed/authorization/v1beta1/fake + - kubernetes/typed/autoscaling/v1 + - kubernetes/typed/autoscaling/v1/fake + - kubernetes/typed/autoscaling/v2beta1 + - kubernetes/typed/autoscaling/v2beta1/fake + - kubernetes/typed/autoscaling/v2beta2 + - kubernetes/typed/autoscaling/v2beta2/fake + - kubernetes/typed/batch/v1 + - kubernetes/typed/batch/v1/fake + - kubernetes/typed/batch/v1beta1 + - kubernetes/typed/batch/v1beta1/fake + - kubernetes/typed/batch/v2alpha1 + - kubernetes/typed/batch/v2alpha1/fake + - kubernetes/typed/certificates/v1beta1 + - kubernetes/typed/certificates/v1beta1/fake + - kubernetes/typed/coordination/v1beta1 + - kubernetes/typed/coordination/v1beta1/fake + - kubernetes/typed/core/v1 + - kubernetes/typed/core/v1/fake + - kubernetes/typed/events/v1beta1 + - kubernetes/typed/events/v1beta1/fake + - kubernetes/typed/extensions/v1beta1 + - kubernetes/typed/extensions/v1beta1/fake + - kubernetes/typed/networking/v1 + - kubernetes/typed/networking/v1/fake + - kubernetes/typed/policy/v1beta1 + - kubernetes/typed/policy/v1beta1/fake + - kubernetes/typed/rbac/v1 + - kubernetes/typed/rbac/v1/fake + - kubernetes/typed/rbac/v1alpha1 + - kubernetes/typed/rbac/v1alpha1/fake + - kubernetes/typed/rbac/v1beta1 + - kubernetes/typed/rbac/v1beta1/fake + - kubernetes/typed/scheduling/v1alpha1 + - kubernetes/typed/scheduling/v1alpha1/fake + - kubernetes/typed/scheduling/v1beta1 + - kubernetes/typed/scheduling/v1beta1/fake + - kubernetes/typed/settings/v1alpha1 + - kubernetes/typed/settings/v1alpha1/fake + - kubernetes/typed/storage/v1 + - kubernetes/typed/storage/v1/fake + - kubernetes/typed/storage/v1alpha1 + - kubernetes/typed/storage/v1alpha1/fake + - kubernetes/typed/storage/v1beta1 + - kubernetes/typed/storage/v1beta1/fake + - listers/admissionregistration/v1alpha1 + - listers/admissionregistration/v1beta1 + - listers/apps/v1 + - listers/apps/v1beta1 + - listers/apps/v1beta2 + - listers/auditregistration/v1alpha1 + - listers/autoscaling/v1 + - listers/autoscaling/v2beta1 + - listers/autoscaling/v2beta2 + - listers/batch/v1 + - listers/batch/v1beta1 + - listers/batch/v2alpha1 + - listers/certificates/v1beta1 + - listers/coordination/v1beta1 + - listers/core/v1 + - listers/events/v1beta1 + - listers/extensions/v1beta1 + - listers/networking/v1 + - listers/policy/v1beta1 + - listers/rbac/v1 + - listers/rbac/v1alpha1 + - listers/rbac/v1beta1 + - listers/scheduling/v1alpha1 + - listers/scheduling/v1beta1 + - listers/settings/v1alpha1 + - listers/storage/v1 + - listers/storage/v1alpha1 + - listers/storage/v1beta1 + - pkg/apis/clientauthentication + - pkg/apis/clientauthentication/v1alpha1 + - pkg/apis/clientauthentication/v1beta1 + - pkg/version + - plugin/pkg/client/auth/exec + - rest + - rest/watch + - restmapper + - testing + - tools/auth + - tools/cache + - tools/clientcmd + - tools/clientcmd/api + - tools/clientcmd/api/latest + - tools/clientcmd/api/v1 + - tools/leaderelection + - tools/leaderelection/resourcelock + - tools/metrics + - tools/pager + - tools/record + - tools/reference + - transport + - util/buffer + - util/cert + - util/connrotation + - util/flowcontrol + - util/homedir + - util/integer + - util/retry + - util/workqueue +- name: k8s.io/gengo + version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 + subpackages: + - args + - generator + - namer + - parser + - types +- name: k8s.io/klog + version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f +- name: k8s.io/kube-aggregator + version: 3e0149950b0e22a3b8579db52bd50e40d0dac10e + subpackages: + - pkg/apis/apiregistration + - pkg/apis/apiregistration/v1 + - pkg/apis/apiregistration/v1beta1 + - pkg/client/clientset_generated/clientset/scheme + - pkg/client/clientset_generated/clientset/typed/apiregistration/v1 +- name: k8s.io/kube-openapi + version: c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d + subpackages: + - pkg/aggregator + - pkg/builder + - pkg/common + - pkg/handler + - pkg/util + - pkg/util/proto +- name: sigs.k8s.io/controller-tools + version: 4e23e49e5d401ca6ced86aa30262d0cf2488c504 + repo: https://github.com/openshift/kubernetes-sigs-controller-tools + subpackages: + - pkg/crd/generator + - pkg/crd/util + - pkg/internal/codegen + - pkg/internal/codegen/parse + - pkg/internal/general + - pkg/util +- name: sigs.k8s.io/yaml + version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 +testImports: +- name: vbom.ml/util + version: efcd4e0f97874370259c7d93e12aad57911dea81 + subpackages: + - sortorder diff --git a/vendor/github.com/openshift/library-go/glide.yaml b/vendor/github.com/openshift/library-go/glide.yaml new file mode 100644 index 0000000000..8f5129a9b3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/glide.yaml @@ -0,0 +1,59 @@ +package: github.com/openshift/library-go +import: +- package: k8s.io/apimachinery + version: kubernetes-1.13.4 +- package: k8s.io/api + version: kubernetes-1.13.4 +- package: k8s.io/apiserver + version: kubernetes-1.13.4 +- package: k8s.io/kube-aggregator + version: kubernetes-1.13.4 +- package: k8s.io/client-go + version: kubernetes-1.13.4 +- package: github.com/openshift/api + version: master +- package: github.com/openshift/client-go + version: master + +# crd-schema-gen + # TODO: we need to this to get nullable patch, but we will replace this with new repo soon. +- package: k8s.io/apiextensions-apiserver + repo: https://github.com/openshift/kubernetes-apiextensions-apiserver + version: origin-4.1-kubernetes-1.13.4 +- package: sigs.k8s.io/controller-tools + repo: https://github.com/openshift/kubernetes-sigs-controller-tools + version: origin-4.1-kubernetes-1.13.4 +- package: k8s.io/gengo + version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 + +# sig-master - needed for file observer +- package: github.com/sigma/go-inotify + version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 +# sig-master +- package: github.com/getsentry/raven-go + version: 32a13797442ccb601b11761d74232773c1402d14 +# sig-master - transitive through raven-go, this matches the kube level +- package: github.com/pkg/errors + version: v0.8.0 +# sig-master - transitive through raven-go, this is the level we had when we noticed +- package: github.com/certifi/gocertifi + version: ee1a9a0726d2ae45f54118cac878c990d4016ded +- package: github.com/jteeuwen/go-bindata + version: a0ff2567cfb70903282db057e799fd826784d41d + +# matches kube +- package: github.com/spf13/cobra + version: c439c4fa093711d42e1b01acb1235b52004753c1 +- package: github.com/spf13/pflag + version: 583c0c0531f06d5278b7d917446061adc344b5cd +- package: github.com/sirupsen/logrus + version: 89742aefa4b206dcf400792f3bd35b542998eb3b +- package: github.com/blang/semver + version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 +- package: github.com/imdario/mergo + version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 + + +# matches openshift/origin +- package: github.com/gonum/graph + version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 diff --git a/vendor/github.com/openshift/library-go/pkg/assets/assets.go b/vendor/github.com/openshift/library-go/pkg/assets/assets.go new file mode 100644 index 0000000000..5c26928676 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/assets.go @@ -0,0 +1,150 @@ +package assets + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/errors" +) + +type Permission os.FileMode + +const ( + PermissionDirectoryDefault Permission = 0755 + PermissionFileDefault Permission = 0644 + PermissionFileRestricted Permission = 0600 +) + +// Asset defines a single static asset. +type Asset struct { + Name string + FilePermission Permission + Data []byte +} + +// Assets is a list of assets. +type Assets []Asset + +// New walks through a directory recursively and renders each file as asset. Only those files +// are rendered that make all predicates true. +func New(dir string, data interface{}, predicates ...FileInfoPredicate) (Assets, error) { + files, err := LoadFilesRecursively(dir, predicates...) + if err != nil { + return nil, err + } + + var as Assets + var errs []error + for path, bs := range files { + a, err := assetFromTemplate(path, bs, data) + if err != nil { + errs = append(errs, fmt.Errorf("failed to render %q: %v", path, err)) + continue + } + + as = append(as, *a) + } + + if len(errs) > 0 { + return nil, errors.NewAggregate(errs) + } + + return as, nil +} + +// WriteFiles writes the assets to specified path. +func (as Assets) WriteFiles(path string) error { + if err := os.MkdirAll(path, os.FileMode(PermissionDirectoryDefault)); err != nil { + return err + } + for _, asset := range as { + if _, err := os.Stat(path); os.IsExist(err) { + fmt.Printf("WARNING: File %s already exists, content will be replaced\n", path) + } + if err := asset.WriteFile(path); err != nil { + return err + } + } + return nil +} + +// WriteFile writes a single asset into specified path. +func (a Asset) WriteFile(path string) error { + f := filepath.Join(path, a.Name) + perms := PermissionFileDefault + if err := os.MkdirAll(filepath.Dir(f), os.FileMode(PermissionDirectoryDefault)); err != nil { + return err + } + if a.FilePermission != 0 { + perms = a.FilePermission + } + fmt.Printf("Writing asset: %s\n", f) + return ioutil.WriteFile(f, a.Data, os.FileMode(perms)) +} + +// MustCreateAssetFromTemplate process the given template using and return an asset. +func MustCreateAssetFromTemplate(name string, template []byte, config interface{}) Asset { + asset, err := assetFromTemplate(name, template, config) + if err != nil { + panic(err) + } + return *asset +} + +func assetFromTemplate(name string, tb []byte, data interface{}) (*Asset, error) { + bs, err := renderFile(name, tb, data) + if err != nil { + return nil, err + } + return &Asset{Name: name, Data: bs}, nil +} + +type FileInfoPredicate func(os.FileInfo) bool + +// OnlyYaml is a predicate for LoadFilesRecursively filters out non-yaml files. +func OnlyYaml(info os.FileInfo) bool { + return strings.HasSuffix(info.Name(), ".yaml") || strings.HasSuffix(info.Name(), ".yml") +} + +// LoadFilesRecursively returns a map from relative path names to file content. +func LoadFilesRecursively(dir string, predicates ...FileInfoPredicate) (map[string][]byte, error) { + files := map[string][]byte{} + err := filepath.Walk(dir, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + for _, p := range predicates { + if !p(info) { + return nil + } + } + + bs, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + // make path relative to dir + rel, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + files[rel] = bs + return nil + }, + ) + if err != nil { + return nil, err + } + + return files, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS b/vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS new file mode 100644 index 0000000000..f9d8e59e4b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - mfojtik +approvers: + - mfojtik \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go new file mode 100644 index 0000000000..cec47ed26a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go @@ -0,0 +1,248 @@ +package create + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "strings" + "time" + + "github.com/ghodss/yaml" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + + "github.com/openshift/library-go/pkg/assets" +) + +// CreateOptions allow to specify additional create options. +type CreateOptions struct { + // Filters allows to filter which files we will read from disk. + // Multiple filters can be specified, in that case only files matching all filters will be returned. + Filters []assets.FileInfoPredicate + + // Verbose if true will print out extra messages for debugging + Verbose bool + + // StdErr allows to override the standard error output for printing verbose messages. + // If not set, os.StdErr is used. + StdErr io.Writer +} + +// EnsureManifestsCreated ensures that all resource manifests from the specified directory are created. +// This function will try to create remaining resources in the manifest list after error is occurred. +// This function will keep retrying creation until no errors are reported or the timeout is hit. +// Pass the context to indicate how much time you are willing to wait until all resources are created. +func EnsureManifestsCreated(ctx context.Context, manifestDir string, restConfig *rest.Config, options CreateOptions) error { + client, dc, err := newClientsFn(restConfig) + if err != nil { + return err + } + + manifests, err := load(manifestDir, options) + if err != nil { + return err + } + + if options.Verbose && options.StdErr == nil { + options.StdErr = os.Stderr + } + + // Default QPS in client (when not specified) is 5 requests/per second + // This specifies the interval between "create-all-resources", no need to make this configurable. + interval := 200 * time.Millisecond + + // Retry creation until no errors are returned or the timeout is hit. + var ( + lastCreateError error + retryCount int + mapper meta.RESTMapper + needDiscoveryRefresh bool = true + ) + err = wait.PollImmediateUntil(interval, func() (bool, error) { + retryCount++ + // If we get rest mapper error, we need to pull updated discovery info from API server + if needDiscoveryRefresh { + mapper, err = fetchLatestDiscoveryInfoFn(dc) + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "[#%d] failed to fetch discovery: %s\n", retryCount, err) + } + return false, nil + } + } + err, needDiscoveryRefresh = create(ctx, manifests, client, mapper, options) + if err == nil { + lastCreateError = nil + return true, nil + } + if ctx.Err() == nil || lastCreateError == nil { + lastCreateError = err + } + if options.Verbose { + fmt.Fprintf(options.StdErr, "[#%d] %s\n", retryCount, err) + } + return false, nil + }, ctx.Done()) + + // Return the last observed set of errors from the create process instead of timeout error. + if lastCreateError != nil { + return lastCreateError + } + + return err +} + +// allow to override in unit test +var newClientsFn = newClients + +func newClients(config *rest.Config) (dynamic.Interface, *discovery.DiscoveryClient, error) { + client, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + + // TODO: We can use cacheddiscovery.NewMemCacheClient(dc) and then call .Invalidate() instead of fetchLatestDiscoveryInfo. + // It will require more work in unit test though. + dc, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, nil, err + } + + return client, dc, nil +} + +// allow to override in unit test +var fetchLatestDiscoveryInfoFn = fetchLatestDiscoveryInfo + +func fetchLatestDiscoveryInfo(dc *discovery.DiscoveryClient) (meta.RESTMapper, error) { + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// create will attempt to create all manifests provided using dynamic client. +// It will mutate the manifests argument in case the create succeeded for given manifest. When all manifests are successfully created the resulting +// manifests argument should be empty. +func create(ctx context.Context, manifests map[string]*unstructured.Unstructured, client dynamic.Interface, mapper meta.RESTMapper, options CreateOptions) (error, bool) { + sortedManifestPaths := []string{} + for key := range manifests { + sortedManifestPaths = append(sortedManifestPaths, key) + } + sort.Strings(sortedManifestPaths) + + // Record all errors for the given manifest path (so when we report errors, users can see what manifest failed). + errs := map[string]error{} + + // In case we fail to find a rest-mapping for the resource, force to fetch the updated discovery on next run. + reloadDiscovery := false + + for _, path := range sortedManifestPaths { + select { + case <-ctx.Done(): + return ctx.Err(), false + default: + } + + gvk := manifests[path].GetObjectKind().GroupVersionKind() + mappings, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + errs[path] = fmt.Errorf("unable to get REST mapping for %q: %v", path, err) + reloadDiscovery = true + continue + } + + if mappings.Scope.Name() == meta.RESTScopeNameRoot { + _, err = client.Resource(mappings.Resource).Create(manifests[path], metav1.CreateOptions{}) + } else { + _, err = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()).Create(manifests[path], metav1.CreateOptions{}) + } + + resourceString := mappings.Resource.Resource + "." + mappings.Resource.Version + "." + mappings.Resource.Group + "/" + manifests[path].GetName() + " -n " + manifests[path].GetNamespace() + + // Resource already exists means we already succeeded + // This should never happen as we remove already created items from the manifest list, unless the resource existed beforehand. + if kerrors.IsAlreadyExists(err) { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Skipped %q %s as it already exists\n", path, resourceString) + } + delete(manifests, path) + continue + } + + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to create %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to create: %v", err) + continue + } + + if options.Verbose { + fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) + } + + // Creation succeeded lets remove the manifest from the list to avoid creating it second time + delete(manifests, path) + } + + return formatErrors("failed to create some manifests", errs), reloadDiscovery +} + +func formatErrors(prefix string, errors map[string]error) error { + if len(errors) == 0 { + return nil + } + aggregatedErrMessages := []string{} + keys := []string{} + for key := range errors { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, k := range keys { + aggregatedErrMessages = append(aggregatedErrMessages, fmt.Sprintf("%q: %v", k, errors[k])) + } + return fmt.Errorf("%s:\n%s", prefix, strings.Join(aggregatedErrMessages, "\n")) +} + +func load(assetsDir string, options CreateOptions) (map[string]*unstructured.Unstructured, error) { + manifests := map[string]*unstructured.Unstructured{} + manifestsBytesMap, err := assets.LoadFilesRecursively(assetsDir, options.Filters...) + if err != nil { + return nil, err + } + + errs := map[string]error{} + for manifestPath, manifestBytes := range manifestsBytesMap { + manifestJSON, err := yaml.YAMLToJSON(manifestBytes) + if err != nil { + errs[manifestPath] = fmt.Errorf("unable to convert asset %q from YAML to JSON: %v", manifestPath, err) + continue + } + manifestObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, manifestJSON) + if err != nil { + errs[manifestPath] = fmt.Errorf("unable to decode asset %q: %v", manifestPath, err) + continue + } + manifestUnstructured, ok := manifestObj.(*unstructured.Unstructured) + if !ok { + errs[manifestPath] = fmt.Errorf("unable to convert asset %q to unstructed", manifestPath) + continue + } + manifests[manifestPath] = manifestUnstructured + } + + return manifests, formatErrors("failed to load some manifests", errs) +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000..bea8b9a596 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubeapiserveroperatorconfigs.kubeapiserver.operator.openshift.io +spec: + scope: Cluster + group: kubeapiserver.operator.openshift.io + version: v1alpha1 + names: + kind: KubeAPIServerOperatorConfig + plural: kubeapiserveroperatorconfigs + singular: kubeapiserveroperatorconfig + categories: + - coreoperators + subresources: + status: {} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml new file mode 100644 index 0000000000..d208ba4ee7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-kube-apiserver + labels: + openshift.io/run-level: "0" \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml new file mode 100644 index 0000000000..c3f63e8c7d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregator-client-ca + namespace: openshift-kube-apiserver +data: + ca-bundle.crt: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml new file mode 100644 index 0000000000..fafd307b35 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml @@ -0,0 +1,6 @@ +apiVersion: kubeapiserver.operator.openshift.io/v1alpha1 +kind: KubeAPIServerOperatorConfig +metadata: + name: instance +spec: + managementState: Managed diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml new file mode 100644 index 0000000000..9b8da64a0c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: aggregator-client + namespace: openshift-kube-apiserver +type: SecretTypeTLS +data: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/template.go b/vendor/github.com/openshift/library-go/pkg/assets/template.go new file mode 100644 index 0000000000..7854392203 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/template.go @@ -0,0 +1,78 @@ +package assets + +import ( + "bytes" + "encoding/base64" + "strings" + "text/template" + "time" + + "k8s.io/client-go/util/cert" +) + +var templateFuncs = map[string]interface{}{ + "notAfter": notAfter, + "notBefore": notBefore, + "issuer": issuer, + "base64": base64encode, + "indent": indent, + "load": load, +} + +func indent(indention int, v []byte) string { + newline := "\n" + strings.Repeat(" ", indention) + return strings.Replace(string(v), "\n", newline, -1) +} + +func base64encode(v []byte) string { + return base64.StdEncoding.EncodeToString(v) +} + +func notAfter(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].NotAfter.Format(time.RFC3339) +} + +func notBefore(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].NotBefore.Format(time.RFC3339) +} + +func issuer(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].Issuer.CommonName +} + +func load(n string, assets map[string][]byte) []byte { + return assets[n] +} + +func renderFile(name string, tb []byte, data interface{}) ([]byte, error) { + tmpl, err := template.New(name).Funcs(templateFuncs).Parse(string(tb)) + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/util.go b/vendor/github.com/openshift/library-go/pkg/certs/util.go new file mode 100644 index 0000000000..5ec6354a50 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/certs/util.go @@ -0,0 +1,70 @@ +package certs + +import ( + "crypto/x509" + "fmt" + "strings" + "time" +) + +const defaultOutputTimeFormat = "Jan 2 15:04:05 2006" + +// nowFn is used in unit test to freeze time. +var nowFn = time.Now().UTC + +// CertificateToString converts a certificate into a human readable string. +// This function should guarantee consistent output format for must-gather tooling and any code +// that prints the certificate details. +func CertificateToString(certificate *x509.Certificate) string { + humanName := certificate.Subject.CommonName + signerHumanName := certificate.Issuer.CommonName + + if certificate.Subject.CommonName == certificate.Issuer.CommonName { + signerHumanName = "" + } + + usages := []string{} + for _, curr := range certificate.ExtKeyUsage { + if curr == x509.ExtKeyUsageClientAuth { + usages = append(usages, "client") + continue + } + if curr == x509.ExtKeyUsageServerAuth { + usages = append(usages, "serving") + continue + } + + usages = append(usages, fmt.Sprintf("%d", curr)) + } + + validServingNames := []string{} + for _, ip := range certificate.IPAddresses { + validServingNames = append(validServingNames, ip.String()) + } + for _, dnsName := range certificate.DNSNames { + validServingNames = append(validServingNames, dnsName) + } + + servingString := "" + if len(validServingNames) > 0 { + servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) + } + + groupString := "" + if len(certificate.Subject.Organization) > 0 { + groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) + } + + return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, + servingString, signerHumanName, certificate.NotBefore.UTC().Format(defaultOutputTimeFormat), + certificate.NotAfter.UTC().Format(defaultOutputTimeFormat), nowFn().Format(defaultOutputTimeFormat)) +} + +// CertificateBundleToString converts a certificate bundle into a human readable string. +func CertificateBundleToString(bundle []*x509.Certificate) string { + output := []string{} + for i, cert := range bundle { + output = append(output, fmt.Sprintf("[#%d]: %s", i, CertificateToString(cert))) + } + return strings.Join(output, "\n") +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go new file mode 100644 index 0000000000..a247311057 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go @@ -0,0 +1,131 @@ +package client + +import ( + "io/ioutil" + "net" + "net/http" + "time" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" +) + +// GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, +// then applies overrides. +func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides *ClientConnectionOverrides) (*rest.Config, error) { + if len(kubeConfigFile) > 0 { + return GetClientConfig(kubeConfigFile, overrides) + } + + clientConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + applyClientConnectionOverrides(overrides, clientConfig) + + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} + if overrides != nil { + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + } + clientConfig.WrapTransport = t.DefaultClientTransport + + return clientConfig, nil +} + +// GetClientConfig returns the rest.Config for a kubeconfig file +func GetClientConfig(kubeConfigFile string, overrides *ClientConnectionOverrides) (*rest.Config, error) { + kubeConfigBytes, err := ioutil.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} + if overrides != nil { + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + } + clientConfig.WrapTransport = t.DefaultClientTransport + + return clientConfig, nil +} + +// applyClientConnectionOverrides updates a kubeConfig with the overrides from the config. +func applyClientConnectionOverrides(overrides *ClientConnectionOverrides, kubeConfig *rest.Config) { + if overrides == nil { + return + } + if overrides.QPS > 0 { + kubeConfig.QPS = overrides.QPS + } + if overrides.Burst > 0 { + kubeConfig.Burst = int(overrides.Burst) + } + if len(overrides.AcceptContentTypes) > 0 { + kubeConfig.ContentConfig.AcceptContentTypes = overrides.AcceptContentTypes + } + if len(overrides.ContentType) > 0 { + kubeConfig.ContentConfig.ContentType = overrides.ContentType + } + + // TODO both of these default values look wrong + // if we have no preferences at this point, claim that we accept both proto and json. We will get proto if the server supports it. + // this is a slightly niggly thing. If the server has proto and our client does not (possible, but not super likely) then this fails. + if len(kubeConfig.ContentConfig.AcceptContentTypes) == 0 { + kubeConfig.ContentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + } + if len(kubeConfig.ContentConfig.ContentType) == 0 { + kubeConfig.ContentConfig.ContentType = "application/vnd.kubernetes.protobuf" + } +} + +type ClientTransportOverrides struct { + WrapTransport func(rt http.RoundTripper) http.RoundTripper + MaxIdleConnsPerHost int +} + +// defaultClientTransport sets defaults for a client Transport that are suitable for use by infrastructure components. +func (c ClientTransportOverrides) DefaultClientTransport(rt http.RoundTripper) http.RoundTripper { + transport, ok := rt.(*http.Transport) + if !ok { + return rt + } + + transport.DialContext = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext + + // Hold open more internal idle connections + transport.MaxIdleConnsPerHost = 100 + if c.MaxIdleConnsPerHost > 0 { + transport.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost + } + + if c.WrapTransport == nil { + return transport + + } + return c.WrapTransport(transport) +} + +// ClientConnectionOverrides allows overriding values for rest.Config not held in a kubeconfig. Most commonly used +// for QPS. Empty values are not used. +type ClientConnectionOverrides struct { + configv1.ClientConnectionOverrides + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle (keep-alive) connections to keep per-host:port. + // If zero, DefaultMaxIdleConnsPerHost is used. + // TODO roll this into the connection overrides in api + MaxIdleConnsPerHost int +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go new file mode 100644 index 0000000000..c2ddfd9956 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go @@ -0,0 +1,140 @@ +package v1helpers + +import ( + "bytes" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" + + configv1 "github.com/openshift/api/config/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +func SetStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) { + if conditions == nil { + conditions = &[]configv1.ClusterOperatorStatusCondition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) { + if conditions == nil { + conditions = &[]configv1.ClusterOperatorStatusCondition{} + } + newConditions := []configv1.ClusterOperatorStatusCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// GetStatusDiff returns a string representing change in condition status in human readable form. +func GetStatusDiff(oldStatus configv1.ClusterOperatorStatus, newStatus configv1.ClusterOperatorStatus) string { + messages := []string{} + for _, newCondition := range newStatus.Conditions { + existingStatusCondition := FindStatusCondition(oldStatus.Conditions, newCondition.Type) + if existingStatusCondition == nil { + messages = append(messages, fmt.Sprintf("%s set to %s (%q)", newCondition.Type, newCondition.Status, newCondition.Message)) + continue + } + if existingStatusCondition.Status != newCondition.Status { + messages = append(messages, fmt.Sprintf("%s changed from %s to %s (%q)", existingStatusCondition.Type, existingStatusCondition.Status, newCondition.Status, newCondition.Message)) + continue + } + if existingStatusCondition.Message != newCondition.Message { + messages = append(messages, fmt.Sprintf("%s message changed from %q to %q", existingStatusCondition.Type, existingStatusCondition.Message, newCondition.Message)) + } + } + for _, oldCondition := range oldStatus.Conditions { + // This should not happen. It means we removed old condition entirely instead of just changing its status + if c := FindStatusCondition(newStatus.Conditions, oldCondition.Type); c == nil { + messages = append(messages, fmt.Sprintf("%s was removed", oldCondition.Type)) + } + } + + if !equality.Semantic.DeepEqual(oldStatus.RelatedObjects, newStatus.RelatedObjects) { + messages = append(messages, fmt.Sprintf("status.relatedObjects changed from %q to %q", oldStatus.RelatedObjects, newStatus.RelatedObjects)) + } + if !equality.Semantic.DeepEqual(oldStatus.Extension, newStatus.Extension) { + messages = append(messages, fmt.Sprintf("status.extension changed from %q to %q", oldStatus.Extension, newStatus.Extension)) + } + + if len(messages) == 0 { + // ignore errors + originalJSON := &bytes.Buffer{} + json.NewEncoder(originalJSON).Encode(oldStatus) + newJSON := &bytes.Buffer{} + json.NewEncoder(newJSON).Encode(newStatus) + messages = append(messages, diff.StringDiff(originalJSON.String(), newJSON.String())) + } + + return strings.Join(messages, ",") +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `configv1.ConditionTrue` +func IsStatusConditionTrue(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `configv1.ConditionFalse` +func IsStatusConditionFalse(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionFalse) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status configv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// IsStatusConditionNotIn returns true when the conditionType does not match the status. +func IsStatusConditionNotIn(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status ...configv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + for _, s := range status { + if s == condition.Status { + return false + } + } + return true + } + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go b/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go new file mode 100644 index 0000000000..0bd77a7f46 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go @@ -0,0 +1,81 @@ +package configdefaults + +import ( + "time" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +func DefaultString(target *string, defaultVal string) { + if len(*target) == 0 { + *target = defaultVal + } +} + +func DefaultInt(target *int, defaultVal int) { + if *target == 0 { + *target = defaultVal + } +} + +func DefaultMetaDuration(target *time.Duration, defaultVal time.Duration) { + if *target == 0 { + *target = defaultVal + } +} + +func DefaultStringSlice(target *[]string, defaultVal []string) { + if len(*target) == 0 { + *target = defaultVal + } +} + +func SetRecommendedHTTPServingInfoDefaults(config *configv1.HTTPServingInfo) { + if config.MaxRequestsInFlight == 0 { + config.MaxRequestsInFlight = 3000 + } + if config.RequestTimeoutSeconds == 0 { + config.RequestTimeoutSeconds = 60 * 60 // one hour + } + + SetRecommendedServingInfoDefaults(&config.ServingInfo) +} + +func SetRecommendedServingInfoDefaults(config *configv1.ServingInfo) { + DefaultString(&config.BindAddress, "0.0.0.0:8443") + DefaultString(&config.BindNetwork, "tcp4") + DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/serving-cert/tls.key") + DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/serving-cert/tls.crt") + DefaultString(&config.ClientCA, "/var/run/configmaps/client-ca/ca-bundle.crt") + DefaultString(&config.MinTLSVersion, crypto.TLSVersionToNameOrDie(crypto.DefaultTLSVersion())) + + if len(config.CipherSuites) == 0 { + config.CipherSuites = crypto.CipherSuitesToNamesOrDie(crypto.DefaultCiphers()) + } +} + +func SetRecommendedGenericAPIServerConfigDefaults(config *configv1.GenericAPIServerConfig) { + SetRecommendedHTTPServingInfoDefaults(&config.ServingInfo) + SetRecommendedEtcdConnectionInfoDefaults(&config.StorageConfig.EtcdConnectionInfo) + SetRecommendedKubeClientConfigDefaults(&config.KubeClientConfig) +} + +func SetRecommendedEtcdConnectionInfoDefaults(config *configv1.EtcdConnectionInfo) { + DefaultStringSlice(&config.URLs, []string{"https://etcd.kube-system.svc:2379"}) + DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/etcd-client/tls.key") + DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/etcd-client/tls.crt") + DefaultString(&config.CA, "/var/run/configmaps/etcd-serving-ca/ca-bundle.crt") +} + +func SetRecommendedKubeClientConfigDefaults(config *configv1.KubeClientConfig) { + // these are historical values + if config.ConnectionOverrides.QPS <= 0 { + config.ConnectionOverrides.QPS = 150.0 + } + if config.ConnectionOverrides.Burst <= 0 { + config.ConnectionOverrides.Burst = 300 + } + DefaultString(&config.ConnectionOverrides.AcceptContentTypes, "application/vnd.kubernetes.protobuf,application/json") + DefaultString(&config.ConnectionOverrides.ContentType, "application/vnd.kubernetes.protobuf") +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go new file mode 100644 index 0000000000..f28ef543f1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go @@ -0,0 +1,71 @@ +package helpers + +import ( + "io/ioutil" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/client" +) + +// TODO this file needs to collapse with pkg/config/client. We cannot safely delegate from this file because this one +// TODO uses JSON and other uses protobuf. + +// GetKubeClientConfig loads in-cluster config if kubeConfigFile is empty or the file if not, then applies overrides. +func GetKubeClientConfig(kubeClientConnection configv1.KubeClientConfig) (*rest.Config, error) { + return GetKubeConfigOrInClusterConfig(kubeClientConnection.KubeConfig, kubeClientConnection.ConnectionOverrides) +} + +// GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, +// then applies overrides. +func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides configv1.ClientConnectionOverrides) (*rest.Config, error) { + if len(kubeConfigFile) > 0 { + return GetClientConfig(kubeConfigFile, overrides) + } + + clientConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport + + return clientConfig, nil +} + +func GetClientConfig(kubeConfigFile string, overrides configv1.ClientConnectionOverrides) (*rest.Config, error) { + kubeConfigBytes, err := ioutil.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport + + return clientConfig, nil +} + +// applyClientConnectionOverrides updates a kubeConfig with the overrides from the config. +func applyClientConnectionOverrides(overrides configv1.ClientConnectionOverrides, kubeConfig *rest.Config) { + if overrides.QPS != 0 { + kubeConfig.QPS = overrides.QPS + } + if overrides.Burst != 0 { + kubeConfig.Burst = int(overrides.Burst) + } + if len(overrides.AcceptContentTypes) != 0 { + kubeConfig.ContentConfig.AcceptContentTypes = overrides.AcceptContentTypes + } + if len(overrides.ContentType) != 0 { + kubeConfig.ContentConfig.ContentType = overrides.ContentType + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go new file mode 100644 index 0000000000..21d4d24f17 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go @@ -0,0 +1,145 @@ +package helpers + +import ( + "strings" + + configv1 "github.com/openshift/api/config/v1" +) + +func GetHTTPServingInfoFileReferences(config *configv1.HTTPServingInfo) []*string { + if config == nil { + return []*string{} + } + + return GetServingInfoFileReferences(&config.ServingInfo) +} + +func GetServingInfoFileReferences(config *configv1.ServingInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.ClientCA) + for i := range config.NamedCertificates { + refs = append(refs, &config.NamedCertificates[i].CertFile) + refs = append(refs, &config.NamedCertificates[i].KeyFile) + } + + return refs +} + +func GetCertFileReferences(config *configv1.CertInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.CertFile) + refs = append(refs, &config.KeyFile) + return refs +} + +func GetRemoteConnectionInfoFileReferences(config *configv1.RemoteConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetEtcdConnectionInfoFileReferences(config *configv1.EtcdConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetStringSourceFileReferences(s *configv1.StringSource) []*string { + if s == nil { + return []*string{} + } + + return []*string{ + &s.File, + &s.KeyFile, + } +} + +func GetAdmissionPluginConfigFileReferences(config *configv1.AdmissionPluginConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.Location) + return refs +} + +func GetAuditConfigFileReferences(config *configv1.AuditConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.PolicyFile) + refs = append(refs, &config.AuditFilePath) + return refs +} + +func GetKubeClientConfigFileReferences(config *configv1.KubeClientConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.KubeConfig) + return refs +} + +func GetGenericAPIServerConfigFileReferences(config *configv1.GenericAPIServerConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetHTTPServingInfoFileReferences(&config.ServingInfo)...) + refs = append(refs, GetEtcdConnectionInfoFileReferences(&config.StorageConfig.EtcdConnectionInfo)...) + refs = append(refs, GetAuditConfigFileReferences(&config.AuditConfig)...) + refs = append(refs, GetKubeClientConfigFileReferences(&config.KubeClientConfig)...) + + // TODO admission config file resolution is currently broken. + //for k := range config.AdmissionPluginConfig { + // refs = append(refs, GetAdmissionPluginConfigReferences(&(config.AdmissionPluginConfig[k]))...) + //} + return refs +} + +func GetFlagsWithFileExtensionsFileReferences(args map[string][]string) []*string { + if args == nil { + return []*string{} + } + + refs := []*string{} + for key, s := range args { + if len(s) == 0 { + continue + } + if !strings.HasSuffix(key, "-file") && !strings.HasSuffix(key, "-dir") { + continue + } + for i := range s { + refs = append(refs, &s[i]) + } + } + + return refs +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go new file mode 100644 index 0000000000..fa7e4b4651 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go @@ -0,0 +1,64 @@ +package helpers + +import ( + "fmt" + "path/filepath" + "strings" +) + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory. +// Empty and "-" paths are never resolved. +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths, or "-" + if len(*ref) > 0 && *ref != "-" { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +func makeRelative(path, base string) (string, error) { + if len(path) > 0 && path != "-" { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +// Empty and "-" paths are never relativized. +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths, or "-" + if len(*ref) > 0 && *ref != "-" { + rel, err := makeRelative(*ref, base) + if err != nil { + return err + } + + if rel == "-" { + rel = "./-" + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go new file mode 100644 index 0000000000..f3399e867d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -0,0 +1,102 @@ +package leaderelection + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + + configv1 "github.com/openshift/api/config/v1" +) + +// ToConfigMapLeaderElection returns a leader election config that you just need to fill in the Callback for. Don't forget the callbacks! +func ToConfigMapLeaderElection(clientConfig *rest.Config, config configv1.LeaderElection, component, identity string) (leaderelection.LeaderElectionConfig, error) { + kubeClient, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return leaderelection.LeaderElectionConfig{}, err + } + + if len(identity) == 0 { + identity = string(uuid.NewUUID()) + } + if len(config.Namespace) == 0 { + return leaderelection.LeaderElectionConfig{}, fmt.Errorf("namespace may not be empty") + } + if len(config.Name) == 0 { + return leaderelection.LeaderElectionConfig{}, fmt.Errorf("name may not be empty") + } + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + eventRecorder := eventBroadcaster.NewRecorder(clientgoscheme.Scheme, corev1.EventSource{Component: component}) + rl, err := resourcelock.New( + resourcelock.ConfigMapsResourceLock, + config.Namespace, + config.Name, + kubeClient.CoreV1(), + resourcelock.ResourceLockConfig{ + Identity: identity, + EventRecorder: eventRecorder, + }) + if err != nil { + return leaderelection.LeaderElectionConfig{}, err + } + + return leaderelection.LeaderElectionConfig{ + Lock: rl, + LeaseDuration: config.LeaseDuration.Duration, + RenewDeadline: config.RenewDeadline.Duration, + RetryPeriod: config.RetryPeriod.Duration, + Callbacks: leaderelection.LeaderCallbacks{ + OnStoppedLeading: func() { + klog.Fatalf("leaderelection lost") + }, + }, + }, nil +} + +// LeaderElectionDefaulting applies what we think are reasonable defaults. It does not mutate the original. +// We do defaulting outside the API so that we can change over time and know whether the user intended to override our values +// as opposed to simply getting the defaulted serialization at some point. +func LeaderElectionDefaulting(config configv1.LeaderElection, defaultNamespace, defaultName string) configv1.LeaderElection { + ret := *(&config).DeepCopy() + + if ret.LeaseDuration.Duration == 0 { + ret.LeaseDuration.Duration = 60 * time.Second + } + if ret.RenewDeadline.Duration == 0 { + ret.RenewDeadline.Duration = 35 * time.Second + } + if ret.RetryPeriod.Duration == 0 { + ret.RetryPeriod.Duration = 10 * time.Second + } + if len(ret.Namespace) == 0 { + if len(defaultNamespace) > 0 { + ret.Namespace = defaultNamespace + } else { + // Fall back to the namespace associated with the service account token, if available + if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + ret.Namespace = ns + } + } + } + } + if len(ret.Name) == 0 { + ret.Name = defaultName + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/options.go b/vendor/github.com/openshift/library-go/pkg/config/serving/options.go new file mode 100644 index 0000000000..efb446ba4e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/options.go @@ -0,0 +1,51 @@ +package serving + +import ( + "fmt" + "net" + "strconv" + + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" + utilflag "k8s.io/apiserver/pkg/util/flag" + + configv1 "github.com/openshift/api/config/v1" +) + +func ToServingOptions(servingInfo configv1.HTTPServingInfo) (*genericapiserveroptions.SecureServingOptionsWithLoopback, error) { + host, portString, err := net.SplitHostPort(servingInfo.BindAddress) + if err != nil { + return nil, fmt.Errorf("bindAddress is invalid: %v", err) + } + port, err := strconv.Atoi(portString) + if err != nil { + return nil, fmt.Errorf("bindAddress is invalid: %v", err) + } + if t := net.ParseIP(host); t == nil { + return nil, fmt.Errorf("bindAddress is invalid: %v", "not an IP") + } + + servingOptions := genericapiserveroptions.NewSecureServingOptions() + servingOptions.BindAddress = net.ParseIP(host) + servingOptions.BindPort = port + servingOptions.BindNetwork = servingInfo.BindNetwork + servingOptions.ServerCert.CertKey.CertFile = servingInfo.CertFile + servingOptions.ServerCert.CertKey.KeyFile = servingInfo.KeyFile + servingOptions.CipherSuites = servingInfo.CipherSuites + servingOptions.MinTLSVersion = servingInfo.MinTLSVersion + + for _, namedCert := range servingInfo.NamedCertificates { + genericNamedCertKey := utilflag.NamedCertKey{ + Names: namedCert.Names, + CertFile: namedCert.CertFile, + KeyFile: namedCert.KeyFile, + } + + servingOptions.SNICertKeys = append(servingOptions.SNICertKeys, genericNamedCertKey) + } + + // TODO sort out what we should do here + //servingOptions.HTTP2MaxStreamsPerConnection = ?? + + servingOptionsWithLoopback := servingOptions.WithLoopback() + return servingOptionsWithLoopback, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go new file mode 100644 index 0000000000..3869d5c2e9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go @@ -0,0 +1,78 @@ +package serving + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" + genericapiserver "k8s.io/apiserver/pkg/server" + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" + "k8s.io/klog" + + configv1 "github.com/openshift/api/config/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" +) + +func ToServerConfig(ctx context.Context, servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization, + kubeConfigFile string) (*genericapiserver.Config, error) { + scheme := runtime.NewScheme() + metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) + config := genericapiserver.NewConfig(serializer.NewCodecFactory(scheme)) + + servingOptions, err := ToServingOptions(servingInfo) + if err != nil { + return nil, err + } + + if err := servingOptions.ApplyTo(&config.SecureServing, &config.LoopbackClientConfig); err != nil { + return nil, err + } + + var lastApplyErr error + + pollCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + if !authenticationConfig.Disabled { + authenticationOptions := genericapiserveroptions.NewDelegatingAuthenticationOptions() + authenticationOptions.RemoteKubeConfigFile = kubeConfigFile + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authenticationOptions.ApplyTo(&config.Authentication, config.SecureServing, config.OpenAPIConfig) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authentication (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr + } + } + + if !authorizationConfig.Disabled { + authorizationOptions := genericapiserveroptions.NewDelegatingAuthorizationOptions() + authorizationOptions.RemoteKubeConfigFile = kubeConfigFile + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authorizationOptions.ApplyTo(&config.Authorization) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authorization (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr + } + } + + return config, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/validation/general.go b/vendor/github.com/openshift/library-go/pkg/config/validation/general.go new file mode 100644 index 0000000000..3a5dcd0b7f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/validation/general.go @@ -0,0 +1,130 @@ +package validation + +import ( + "fmt" + "net" + "net/url" + "os" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +type ValidationResults struct { + Warnings field.ErrorList + Errors field.ErrorList +} + +func (r *ValidationResults) Append(additionalResults ValidationResults) { + r.AddErrors(additionalResults.Errors...) + r.AddWarnings(additionalResults.Warnings...) +} + +func (r *ValidationResults) AddErrors(errors ...*field.Error) { + if len(errors) == 0 { + return + } + r.Errors = append(r.Errors, errors...) +} + +func (r *ValidationResults) AddWarnings(warnings ...*field.Error) { + if len(warnings) == 0 { + return + } + r.Warnings = append(r.Warnings, warnings...) +} + +func ValidateHostPort(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(value) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if _, _, err := net.SplitHostPort(value); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, value, "must be a host:port")) + } + + return allErrs +} + +func ValidateFile(path string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(path) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if _, err := os.Stat(path); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, path, fmt.Sprintf("could not read file: %v", err))) + } + + return allErrs +} + +func ValidateSecureURL(urlString string, fldPath *field.Path) (*url.URL, field.ErrorList) { + url, urlErrs := ValidateURL(urlString, fldPath) + if len(urlErrs) == 0 && url.Scheme != "https" { + urlErrs = append(urlErrs, field.Invalid(fldPath, urlString, "must use https scheme")) + } + return url, urlErrs +} + +func ValidateURL(urlString string, fldPath *field.Path) (*url.URL, field.ErrorList) { + allErrs := field.ErrorList{} + + urlObj, err := url.Parse(urlString) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must be a valid URL")) + return nil, allErrs + } + if len(urlObj.Scheme) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must contain a scheme (e.g. https://)")) + } + if len(urlObj.Host) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must contain a host")) + } + return urlObj, allErrs +} + +func ValidateDir(path string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(path) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + fileInfo, err := os.Stat(path) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, path, fmt.Sprintf("could not read info: %v", err))) + } else if !fileInfo.IsDir() { + allErrs = append(allErrs, field.Invalid(fldPath, path, "not a directory")) + } + } + + return allErrs +} + +// HostnameMatchSpecCandidates returns a list of match specs that would match the provided hostname +// Returns nil if len(hostname) == 0 +func HostnameMatchSpecCandidates(hostname string) []string { + if len(hostname) == 0 { + return nil + } + + // Exact match has priority + candidates := []string{hostname} + + // Replace successive labels in the name with wildcards, to require an exact match on number of + // path segments, because certificates cannot wildcard multiple levels of subdomains + // + // This is primarily to be consistent with tls.Config#getCertificate implementation + // + // It using a cert signed for *.foo.example.com and *.bar.example.com by specifying the name *.*.example.com + labels := strings.Split(hostname, ".") + for i := range labels { + labels[i] = "*" + candidates = append(candidates, strings.Join(labels, ".")) + } + return candidates +} + +// HostnameMatches returns true if the given hostname is matched by the given matchSpec +func HostnameMatches(hostname string, matchSpec string) bool { + return sets.NewString(HostnameMatchSpecCandidates(hostname)...).Has(matchSpec) +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go b/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go new file mode 100644 index 0000000000..947f5c9148 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go @@ -0,0 +1,174 @@ +package validation + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + utilvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +func ValidateHTTPServingInfo(info configv1.HTTPServingInfo, fldPath *field.Path) ValidationResults { + validationResults := ValidationResults{} + + validationResults.Append(ValidateServingInfo(info.ServingInfo, true, fldPath)) + + if info.MaxRequestsInFlight < 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("maxRequestsInFlight"), info.MaxRequestsInFlight, "must be zero (no limit) or greater")) + } + + if info.RequestTimeoutSeconds < -1 { + validationResults.AddErrors(field.Invalid(fldPath.Child("requestTimeoutSeconds"), info.RequestTimeoutSeconds, "must be -1 (no timeout), 0 (default timeout), or greater")) + } + + return validationResults +} + +func ValidateServingInfo(info configv1.ServingInfo, certificatesRequired bool, fldPath *field.Path) ValidationResults { + validationResults := ValidationResults{} + + validationResults.AddErrors(ValidateHostPort(info.BindAddress, fldPath.Child("bindAddress"))...) + validationResults.AddErrors(ValidateCertInfo(info.CertInfo, certificatesRequired, fldPath)...) + + if len(info.NamedCertificates) > 0 && len(info.CertFile) == 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("namedCertificates"), "", "a default certificate and key is required in certFile/keyFile in order to use namedCertificates")) + } + + validationResults.Append(ValidateNamedCertificates(fldPath.Child("namedCertificates"), info.NamedCertificates)) + + switch info.BindNetwork { + case "tcp", "tcp4", "tcp6": + default: + validationResults.AddErrors(field.Invalid(fldPath.Child("bindNetwork"), info.BindNetwork, "must be 'tcp', 'tcp4', or 'tcp6'")) + } + + if len(info.CertFile) > 0 { + if len(info.ClientCA) > 0 { + validationResults.AddErrors(ValidateFile(info.ClientCA, fldPath.Child("clientCA"))...) + } + } else { + if certificatesRequired && len(info.ClientCA) > 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("clientCA"), info.ClientCA, "cannot specify a clientCA without a certFile")) + } + } + + if _, err := crypto.TLSVersion(info.MinTLSVersion); err != nil { + validationResults.AddErrors(field.NotSupported(fldPath.Child("minTLSVersion"), info.MinTLSVersion, crypto.ValidTLSVersions())) + } + for i, cipher := range info.CipherSuites { + if _, err := crypto.CipherSuite(cipher); err != nil { + validationResults.AddErrors(field.NotSupported(fldPath.Child("cipherSuites").Index(i), cipher, crypto.ValidCipherSuites())) + } + } + + return validationResults +} + +func ValidateNamedCertificates(fldPath *field.Path, namedCertificates []configv1.NamedCertificate) ValidationResults { + validationResults := ValidationResults{} + + takenNames := sets.NewString() + for i, namedCertificate := range namedCertificates { + idxPath := fldPath.Index(i) + + certDNSNames := []string{} + if len(namedCertificate.CertFile) == 0 { + validationResults.AddErrors(field.Required(idxPath.Child("certInfo"), "")) + } else if certInfoErrors := ValidateCertInfo(namedCertificate.CertInfo, false, idxPath); len(certInfoErrors) > 0 { + validationResults.AddErrors(certInfoErrors...) + } else if cert, err := tls.LoadX509KeyPair(namedCertificate.CertFile, namedCertificate.KeyFile); err != nil { + validationResults.AddErrors(field.Invalid(idxPath.Child("certInfo"), namedCertificate.CertInfo, fmt.Sprintf("error loading certificate/key: %v", err))) + } else { + leaf, _ := x509.ParseCertificate(cert.Certificate[0]) + certDNSNames = append(certDNSNames, leaf.Subject.CommonName) + certDNSNames = append(certDNSNames, leaf.DNSNames...) + } + + if len(namedCertificate.Names) == 0 { + validationResults.AddErrors(field.Required(idxPath.Child("names"), "")) + } + for j, name := range namedCertificate.Names { + jdxPath := idxPath.Child("names").Index(j) + if len(name) == 0 { + validationResults.AddErrors(field.Required(jdxPath, "")) + continue + } + + if takenNames.Has(name) { + validationResults.AddErrors(field.Invalid(jdxPath, name, "this name is already used in another named certificate")) + continue + } + + // validate names as domain names or *.*.foo.com domain names + validDNSName := true + for _, s := range strings.Split(name, ".") { + if s != "*" && len(utilvalidation.IsDNS1123Label(s)) != 0 { + validDNSName = false + } + } + if !validDNSName { + validationResults.AddErrors(field.Invalid(jdxPath, name, "must be a valid DNS name")) + continue + } + + takenNames.Insert(name) + + // validate certificate has common name or subject alt names that match + if len(certDNSNames) > 0 { + foundMatch := false + for _, dnsName := range certDNSNames { + if HostnameMatches(dnsName, name) { + foundMatch = true + break + } + // if the cert has a wildcard dnsName, and we've configured a non-wildcard name, see if our specified name will match against the dnsName. + if strings.HasPrefix(dnsName, "*.") && !strings.HasPrefix(name, "*.") && HostnameMatches(name, dnsName) { + foundMatch = true + break + } + } + if !foundMatch { + validationResults.AddWarnings(field.Invalid(jdxPath, name, "the specified certificate does not have a CommonName or DNS subjectAltName that matches this name")) + } + } + } + } + + return validationResults +} + +func ValidateCertInfo(certInfo configv1.CertInfo, required bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if required { + if len(certInfo.CertFile) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("certFile"), "The certificate file must be provided")) + } + if len(certInfo.KeyFile) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("keyFile"), "The certificate key must be provided")) + } + } + + if (len(certInfo.CertFile) == 0) != (len(certInfo.KeyFile) == 0) { + allErrs = append(allErrs, field.Required(fldPath.Child("certFile"), "Both the certificate file and the certificate key must be provided together or not at all")) + allErrs = append(allErrs, field.Required(fldPath.Child("keyFile"), "Both the certificate file and the certificate key must be provided together or not at all")) + } + + if len(certInfo.CertFile) > 0 { + allErrs = append(allErrs, ValidateFile(certInfo.CertFile, fldPath.Child("certFile"))...) + } + + if len(certInfo.KeyFile) > 0 { + allErrs = append(allErrs, ValidateFile(certInfo.KeyFile, fldPath.Child("keyFile"))...) + } + + // validate certfile/keyfile load/parse? + + return allErrs +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go new file mode 100644 index 0000000000..01ed53da63 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go @@ -0,0 +1,273 @@ +package controllercmd + +import ( + "context" + "fmt" + "io/ioutil" + "sync" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + + configv1 "github.com/openshift/api/config/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/config/configdefaults" + leaderelectionconverter "github.com/openshift/library-go/pkg/config/leaderelection" + "github.com/openshift/library-go/pkg/config/serving" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +// StartFunc is the function to call on leader election start +type StartFunc func(*ControllerContext) error + +type ControllerContext struct { + ComponentConfig *unstructured.Unstructured + + // KubeConfig provides the REST config with no content type (it will default to JSON). + // Use this config for CR resources. + KubeConfig *rest.Config + + // ProtoKubeConfig provides the REST config with "application/vnd.kubernetes.protobuf,application/json" content type. + // Note that this config might not be safe for CR resources, instead it should be used for other resources. + ProtoKubeConfig *rest.Config + + // EventRecorder is used to record events in controllers. + EventRecorder events.Recorder + + // Server is the GenericAPIServer serving healthz checks and debug info + Server *genericapiserver.GenericAPIServer + + stopChan <-chan struct{} +} + +// Done returns a channel which will close on termination. +func (c ControllerContext) Done() <-chan struct{} { + return c.stopChan +} + +// defaultObserverInterval specifies the default interval that file observer will do rehash the files it watches and react to any changes +// in those files. +var defaultObserverInterval = 5 * time.Second + +// ControllerBuilder allows the construction of an controller in optional pieces. +type ControllerBuilder struct { + kubeAPIServerConfigFile *string + clientOverrides *client.ClientConnectionOverrides + leaderElection *configv1.LeaderElection + fileObserver fileobserver.Observer + fileObserverReactorFn func(file string, action fileobserver.ActionType) error + + startFunc StartFunc + componentName string + componentNamespace string + instanceIdentity string + observerInterval time.Duration + + servingInfo *configv1.HTTPServingInfo + authenticationConfig *operatorv1alpha1.DelegatedAuthentication + authorizationConfig *operatorv1alpha1.DelegatedAuthorization + healthChecks []healthz.HealthzChecker +} + +// NewController returns a builder struct for constructing the command you want to run +func NewController(componentName string, startFunc StartFunc) *ControllerBuilder { + return &ControllerBuilder{ + startFunc: startFunc, + componentName: componentName, + observerInterval: defaultObserverInterval, + } +} + +// WithRestartOnChange will enable a file observer controller loop that observes changes into specified files. If a change to a file is detected, +// the specified channel will be closed (allowing to graceful shutdown for other channels). +func (b *ControllerBuilder) WithRestartOnChange(stopCh chan<- struct{}, startingFileContent map[string][]byte, files ...string) *ControllerBuilder { + if len(files) == 0 { + return b + } + if b.fileObserver == nil { + observer, err := fileobserver.NewObserver(b.observerInterval) + if err != nil { + panic(err) + } + b.fileObserver = observer + } + var once sync.Once + + b.fileObserverReactorFn = func(filename string, action fileobserver.ActionType) error { + once.Do(func() { + klog.Warning(fmt.Sprintf("Restart triggered because of %s", action.String(filename))) + close(stopCh) + }) + return nil + } + + b.fileObserver.AddReactor(b.fileObserverReactorFn, startingFileContent, files...) + return b +} + +func (b *ControllerBuilder) WithComponentNamespace(ns string) *ControllerBuilder { + b.componentNamespace = ns + return b +} + +// WithLeaderElection adds leader election options +func (b *ControllerBuilder) WithLeaderElection(leaderElection configv1.LeaderElection, defaultNamespace, defaultName string) *ControllerBuilder { + if leaderElection.Disable { + return b + } + + defaulted := leaderelectionconverter.LeaderElectionDefaulting(leaderElection, defaultNamespace, defaultName) + b.leaderElection = &defaulted + return b +} + +// WithServer adds a server that provides metrics and healthz +func (b *ControllerBuilder) WithServer(servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization) *ControllerBuilder { + b.servingInfo = servingInfo.DeepCopy() + configdefaults.SetRecommendedHTTPServingInfoDefaults(b.servingInfo) + b.authenticationConfig = &authenticationConfig + b.authorizationConfig = &authorizationConfig + return b +} + +// WithHealthChecks adds a list of healthchecks to the server +func (b *ControllerBuilder) WithHealthChecks(healthChecks ...healthz.HealthzChecker) *ControllerBuilder { + b.healthChecks = append(b.healthChecks, healthChecks...) + return b +} + +// WithKubeConfigFile sets an optional kubeconfig file. inclusterconfig will be used if filename is empty +func (b *ControllerBuilder) WithKubeConfigFile(kubeConfigFilename string, defaults *client.ClientConnectionOverrides) *ControllerBuilder { + b.kubeAPIServerConfigFile = &kubeConfigFilename + b.clientOverrides = defaults + return b +} + +// WithInstanceIdentity sets the instance identity to use if you need something special. The default is just a UID which is +// usually fine for a pod. +func (b *ControllerBuilder) WithInstanceIdentity(identity string) *ControllerBuilder { + b.instanceIdentity = identity + return b +} + +// Run starts your controller for you. It uses leader election if you asked, otherwise it directly calls you +func (b *ControllerBuilder) Run(config *unstructured.Unstructured, ctx context.Context) error { + clientConfig, err := b.getClientConfig() + if err != nil { + return err + } + + if b.fileObserver != nil { + go b.fileObserver.Run(ctx.Done()) + } + + kubeClient := kubernetes.NewForConfigOrDie(clientConfig) + namespace, err := b.getComponentNamespace() + if err != nil { + klog.Warningf("unable to identify the current namespace for events: %v", err) + } + controllerRef, err := events.GetControllerReferenceForCurrentPod(kubeClient, namespace, nil) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + eventRecorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(namespace), b.componentName, controllerRef) + + // if there is file observer defined for this command, add event into default reaction function. + if b.fileObserverReactorFn != nil { + originalFileObserverReactorFn := b.fileObserverReactorFn + b.fileObserverReactorFn = func(file string, action fileobserver.ActionType) error { + eventRecorder.Warningf("OperatorRestart", "Restarted because of %s", action.String(file)) + return originalFileObserverReactorFn(file, action) + } + } + + if b.servingInfo == nil { + return fmt.Errorf("server config required for health checks and debugging endpoints") + } + + kubeConfig := "" + if b.kubeAPIServerConfigFile != nil { + kubeConfig = *b.kubeAPIServerConfigFile + } + serverConfig, err := serving.ToServerConfig(ctx, *b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) + if err != nil { + return err + } + serverConfig.HealthzChecks = append(serverConfig.HealthzChecks, b.healthChecks...) + + server, err := serverConfig.Complete(nil).New(b.componentName, genericapiserver.NewEmptyDelegate()) + if err != nil { + return err + } + + go func() { + if err := server.PrepareRun().Run(ctx.Done()); err != nil { + klog.Error(err) + } + klog.Fatal("server exited") + }() + + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + + controllerContext := &ControllerContext{ + ComponentConfig: config, + KubeConfig: clientConfig, + ProtoKubeConfig: protoConfig, + EventRecorder: eventRecorder, + Server: server, + stopChan: ctx.Done(), + } + + if b.leaderElection == nil { + if err := b.startFunc(controllerContext); err != nil { + return err + } + return fmt.Errorf("exited") + } + + leaderElection, err := leaderelectionconverter.ToConfigMapLeaderElection(clientConfig, *b.leaderElection, b.componentName, b.instanceIdentity) + if err != nil { + return err + } + + leaderElection.Callbacks.OnStartedLeading = func(ctx context.Context) { + controllerContext.stopChan = ctx.Done() + if err := b.startFunc(controllerContext); err != nil { + klog.Fatal(err) + } + } + leaderelection.RunOrDie(ctx, leaderElection) + return fmt.Errorf("exited") +} + +func (b *ControllerBuilder) getComponentNamespace() (string, error) { + if len(b.componentNamespace) > 0 { + return b.componentNamespace, nil + } + nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "openshift-config-managed", err + } + return string(nsBytes), nil +} + +func (b *ControllerBuilder) getClientConfig() (*rest.Config, error) { + kubeconfig := "" + if b.kubeAPIServerConfigFile != nil { + kubeconfig = *b.kubeAPIServerConfigFile + } + + return client.GetKubeConfigOrInClusterConfig(kubeconfig, b.clientOverrides) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go new file mode 100644 index 0000000000..099b42a87c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go @@ -0,0 +1,217 @@ +package controllercmd + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/spf13/cobra" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/util/logs" + + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + "github.com/openshift/library-go/pkg/config/configdefaults" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/serviceability" + + // for metrics + _ "github.com/openshift/library-go/pkg/controller/metrics" +) + +// ControllerCommandConfig holds values required to construct a command to run. +type ControllerCommandConfig struct { + componentName string + startFunc StartFunc + version version.Info + + basicFlags *ControllerFlags +} + +// NewControllerConfig returns a new ControllerCommandConfig which can be used to wire up all the boiler plate of a controller +// TODO add more methods around wiring health checks and the like +func NewControllerCommandConfig(componentName string, version version.Info, startFunc StartFunc) *ControllerCommandConfig { + return &ControllerCommandConfig{ + startFunc: startFunc, + componentName: componentName, + version: version, + + basicFlags: NewControllerFlags(), + } +} + +// NewCommand returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, +// leader election and other "normal" behaviors. +func (c *ControllerCommandConfig) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Run: func(cmd *cobra.Command, args []string) { + // boiler plate for the "normal" command + rand.Seed(time.Now().UTC().UnixNano()) + logs.InitLogs() + defer logs.FlushLogs() + defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), c.version)() + defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() + serviceability.StartProfiler() + + if err := c.basicFlags.Validate(); err != nil { + klog.Fatal(err) + } + + if err := c.StartController(context.Background()); err != nil { + klog.Fatal(err) + } + }, + } + + c.basicFlags.AddFlags(cmd) + + return cmd +} + +// Config returns the configuration of this command. Use StartController if you don't need to customize the default operator. +// This method does not modify the receiver. +func (c *ControllerCommandConfig) Config() (*unstructured.Unstructured, *operatorv1alpha1.GenericOperatorConfig, []byte, error) { + configContent, unstructuredConfig, err := c.basicFlags.ToConfigObj() + if err != nil { + return nil, nil, nil, err + } + config := &operatorv1alpha1.GenericOperatorConfig{} + if unstructuredConfig != nil { + // make a copy we can mutate + configCopy := unstructuredConfig.DeepCopy() + // force the config to our version to read it + configCopy.SetGroupVersionKind(operatorv1alpha1.GroupVersion.WithKind("GenericOperatorConfig")) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configCopy.Object, config); err != nil { + return nil, nil, nil, err + } + } + return unstructuredConfig, config, configContent, nil +} + +func hasServiceServingCerts(certDir string) bool { + if _, err := os.Stat(filepath.Join(certDir, "tls.crt")); os.IsNotExist(err) { + return false + } + if _, err := os.Stat(filepath.Join(certDir, "tls.key")); os.IsNotExist(err) { + return false + } + return true +} + +// AddDefaultRotationToConfig starts the provided builder with the default rotation set (config + serving info). Use StartController if +// you do not need to customize the controller builder. This method modifies config with self-signed default cert locations if +// necessary. +func (c *ControllerCommandConfig) AddDefaultRotationToConfig(config *operatorv1alpha1.GenericOperatorConfig, configContent []byte) (map[string][]byte, []string, error) { + certDir := "/var/run/secrets/serving-cert" + + observedFiles := []string{ + c.basicFlags.ConfigFile, + // We observe these, so we they are created or modified by service serving cert signer, we can react and restart the process + // that will pick these up instead of generating the self-signed certs. + // NOTE: We are not observing the temporary, self-signed certificates. + filepath.Join(certDir, "tls.crt"), + filepath.Join(certDir, "tls.key"), + } + // startingFileContent holds hardcoded starting content. If we generate our own certificates, then we want to specify empty + // content to avoid a starting race. When we consume them, the race is really about as good as we can do since we don't know + // what's actually been read. + startingFileContent := map[string][]byte{ + c.basicFlags.ConfigFile: configContent, + } + + // if we don't have any serving cert/key pairs specified and the defaults are not present, generate a self-signed set + // TODO maybe this should be optional? It's a little difficult to come up with a scenario where this is worse than nothing though. + if len(config.ServingInfo.CertFile) == 0 && len(config.ServingInfo.KeyFile) == 0 { + servingInfoCopy := config.ServingInfo.DeepCopy() + configdefaults.SetRecommendedHTTPServingInfoDefaults(servingInfoCopy) + + if hasServiceServingCerts(certDir) { + klog.Infof("Using service-serving-cert provided certificates") + config.ServingInfo.CertFile = filepath.Join(certDir, "tls.crt") + config.ServingInfo.KeyFile = filepath.Join(certDir, "tls.key") + } else { + klog.Warningf("Using insecure, self-signed certificates") + temporaryCertDir, err := ioutil.TempDir("", "serving-cert-") + if err != nil { + return nil, nil, err + } + signerName := fmt.Sprintf("%s-signer@%d", c.componentName, time.Now().Unix()) + ca, err := crypto.MakeSelfSignedCA( + filepath.Join(temporaryCertDir, "serving-signer.crt"), + filepath.Join(temporaryCertDir, "serving-signer.key"), + filepath.Join(temporaryCertDir, "serving-signer.serial"), + signerName, + 0, + ) + if err != nil { + return nil, nil, err + } + certDir = temporaryCertDir + + // force the values to be set to where we are writing the certs + config.ServingInfo.CertFile = filepath.Join(certDir, "tls.crt") + config.ServingInfo.KeyFile = filepath.Join(certDir, "tls.key") + // nothing can trust this, so we don't really care about hostnames + servingCert, err := ca.MakeServerCert(sets.NewString("localhost"), 30) + if err != nil { + return nil, nil, err + } + if err := servingCert.WriteCertConfigFile(config.ServingInfo.CertFile, config.ServingInfo.KeyFile); err != nil { + return nil, nil, err + } + crtContent := &bytes.Buffer{} + keyContent := &bytes.Buffer{} + if err := servingCert.WriteCertConfig(crtContent, keyContent); err != nil { + return nil, nil, err + } + + // If we generate our own certificates, then we want to specify empty content to avoid a starting race. This way, + // if any change comes in, we will properly restart + startingFileContent[filepath.Join(certDir, "tls.crt")] = crtContent.Bytes() + startingFileContent[filepath.Join(certDir, "tls.key")] = keyContent.Bytes() + } + } + return startingFileContent, observedFiles, nil +} + +// StartController runs the controller. This is the recommend entrypoint when you don't need +// to customize the builder. +func (c *ControllerCommandConfig) StartController(ctx context.Context) error { + unstructuredConfig, config, configContent, err := c.Config() + if err != nil { + return err + } + + startingFileContent, observedFiles, err := c.AddDefaultRotationToConfig(config, configContent) + if err != nil { + return err + } + + exitOnChangeReactorCh := make(chan struct{}) + ctx2, cancel := context.WithCancel(ctx) + go func() { + select { + case <-exitOnChangeReactorCh: + cancel() + case <-ctx.Done(): + cancel() + } + }() + + builder := NewController(c.componentName, c.startFunc). + WithKubeConfigFile(c.basicFlags.KubeConfigFile, nil). + WithLeaderElection(config.LeaderElection, "", c.componentName+"-lock"). + WithServer(config.ServingInfo, config.Authentication, config.Authorization). + WithRestartOnChange(exitOnChangeReactorCh, startingFileContent, observedFiles...) + + return builder.Run(unstructuredConfig, ctx2) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go new file mode 100644 index 0000000000..5cdb4190b9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go @@ -0,0 +1,129 @@ +package controllercmd + +import ( + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/spf13/cobra" + + "github.com/openshift/library-go/pkg/config/client" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" +) + +// ControllerFlags provides the "normal" controller flags +type ControllerFlags struct { + // ConfigFile hold the configfile to load + ConfigFile string + // KubeConfigFile points to a kubeconfig file if you don't want to use the in cluster config + KubeConfigFile string +} + +// NewControllerFlags returns flags with default values set +func NewControllerFlags() *ControllerFlags { + return &ControllerFlags{} +} + +// Validate makes sure the required flags are specified and no illegal combinations are found +func (o *ControllerFlags) Validate() error { + // everything is optional currently + return nil +} + +// AddFlags register and binds the default flags +func (f *ControllerFlags) AddFlags(cmd *cobra.Command) { + flags := cmd.Flags() + // This command only supports reading from config + flags.StringVar(&f.ConfigFile, "config", f.ConfigFile, "Location of the master configuration file to run from.") + cmd.MarkFlagFilename("config", "yaml", "yml") + flags.StringVar(&f.KubeConfigFile, "kubeconfig", f.KubeConfigFile, "Location of the master configuration file to run from.") + cmd.MarkFlagFilename("kubeconfig", "kubeconfig") +} + +// ToConfigObj given completed flags, returns a config object for the flag that was specified. +// TODO versions goes away in 1.11 +func (f *ControllerFlags) ToConfigObj() ([]byte, *unstructured.Unstructured, error) { + // no file means empty, not err + if len(f.ConfigFile) == 0 { + return nil, nil, nil + } + + content, err := ioutil.ReadFile(f.ConfigFile) + if err != nil { + return nil, nil, err + } + // empty file means empty, not err + if len(content) == 0 { + return nil, nil, err + } + + data, err := kyaml.ToJSON(content) + if err != nil { + return nil, nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, data) + if err != nil { + return nil, nil, err + } + + return content, uncastObj.(*unstructured.Unstructured), nil +} + +// ToClientConfig given completed flags, returns a rest.Config. overrides are optional +func (f *ControllerFlags) ToClientConfig(overrides *client.ClientConnectionOverrides) (*rest.Config, error) { + return client.GetKubeConfigOrInClusterConfig(f.KubeConfigFile, overrides) +} + +// ReadYAML decodes a runtime.Object from the provided scheme +// TODO versions goes away with more complete scheme in 1.11 +func ReadYAML(data []byte, configScheme *runtime.Scheme, versions ...schema.GroupVersion) (runtime.Object, error) { + data, err := kyaml.ToJSON(data) + if err != nil { + return nil, err + } + configCodecFactory := serializer.NewCodecFactory(configScheme) + obj, err := runtime.Decode(configCodecFactory.UniversalDecoder(versions...), data) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", data, err) + } + return obj, err +} + +// ReadYAMLFile read a file and decodes a runtime.Object from the provided scheme +func ReadYAMLFile(filename string, configScheme *runtime.Scheme, versions ...schema.GroupVersion) (runtime.Object, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + obj, err := ReadYAML(data, configScheme, versions...) + if err != nil { + return nil, fmt.Errorf("could not load config file %q due to an error: %v", filename, err) + } + return obj, err +} + +// TODO: we ultimately want a better decoder for JSON that allows us exact line numbers and better +// surrounding text description. This should be removed / replaced when that happens. +func captureSurroundingJSONForError(prefix string, data []byte, err error) error { + if syntaxErr, ok := err.(*json.SyntaxError); err != nil && ok { + offset := syntaxErr.Offset + begin := offset - 20 + if begin < 0 { + begin = 0 + } + end := offset + 20 + if end > int64(len(data)) { + end = int64(len(data)) + } + return fmt.Errorf("%s%v (found near '%s')", prefix, err, string(data[begin:end])) + } + if err != nil { + return fmt.Errorf("%s%v", prefix, err) + } + return err +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS new file mode 100644 index 0000000000..bf630bd071 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - deads2k + - sttts + - mfojtik +approvers: + - mfojtik diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go new file mode 100644 index 0000000000..781afa5cbe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go @@ -0,0 +1,59 @@ +package fileobserver + +import ( + "fmt" + "os" + "time" + + "k8s.io/klog" +) + +type Observer interface { + Run(stopChan <-chan struct{}) + AddReactor(reaction reactorFn, startingFileContent map[string][]byte, files ...string) Observer +} + +// ActionType define a type of action observed on the file +type ActionType int + +const ( + // FileModified means the file content was modified. + FileModified ActionType = iota + + // FileCreated means the file was just created. + FileCreated + + // FileDeleted means the file was deleted. + FileDeleted +) + +// String returns human readable form of action taken on a file. +func (t ActionType) String(filename string) string { + switch t { + case FileCreated: + return fmt.Sprintf("file %s was created", filename) + case FileDeleted: + return fmt.Sprintf("file %s was deleted", filename) + case FileModified: + return fmt.Sprintf("file %s was modified", filename) + } + return "" +} + +// reactorFn define a reaction function called when an observed file is modified. +type reactorFn func(file string, action ActionType) error + +// ExitOnChangeReactor provides reactor function that causes the process to exit when the change is detected. +var ExitOnChangeReactor reactorFn = func(filename string, action ActionType) error { + klog.Infof("exiting because %q changed", filename) + os.Exit(0) + return nil +} + +func NewObserver(interval time.Duration) (Observer, error) { + return &pollingObserver{ + interval: interval, + reactors: map[string][]reactorFn{}, + files: map[string]string{}, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go new file mode 100644 index 0000000000..0f3ca8ec84 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go @@ -0,0 +1,140 @@ +package fileobserver + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" +) + +type pollingObserver struct { + interval time.Duration + reactors map[string][]reactorFn + files map[string]string + + reactorsMutex sync.RWMutex +} + +// AddReactor will add new reactor to this observer. +func (o *pollingObserver) AddReactor(reaction reactorFn, startingFileContent map[string][]byte, files ...string) Observer { + o.reactorsMutex.Lock() + defer o.reactorsMutex.Unlock() + for _, f := range files { + if len(f) == 0 { + panic(fmt.Sprintf("observed file name must not be empty (%#v)", files)) + } + // Do not rehash existing files + if _, exists := o.files[f]; exists { + continue + } + var err error + + if startingContent, ok := startingFileContent[f]; ok { + klog.V(3).Infof("Starting from specified content for file %q", f) + o.files[f], err = calculateHash(bytes.NewBuffer(startingContent)) + if err != nil { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + } else { + klog.V(3).Infof("Adding reactor for file %q", f) + o.files[f], err = calculateFileHash(f) + if err != nil { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + } + o.reactors[f] = append(o.reactors[f], reaction) + } + return o +} + +func (o *pollingObserver) processReactors(stopCh <-chan struct{}) { + err := wait.PollImmediateInfinite(o.interval, func() (bool, error) { + select { + case <-stopCh: + return true, nil + default: + } + o.reactorsMutex.RLock() + defer o.reactorsMutex.RUnlock() + for filename, reactors := range o.reactors { + currentHash, err := calculateFileHash(filename) + if err != nil { + return false, err + } + lastKnownHash := o.files[filename] + + // No file change detected + if lastKnownHash == currentHash { + continue + } + + klog.Infof("Observed change: file:%s (current: %q, lastKnown: %q)", filename, currentHash, lastKnownHash) + o.files[filename] = currentHash + + for i := range reactors { + action := FileModified + switch { + case len(lastKnownHash) == 0: + action = FileCreated + case len(currentHash) == 0: + action = FileDeleted + case len(lastKnownHash) > 0: + action = FileModified + } + + if err := reactors[i](filename, action); err != nil { + klog.Errorf("Reactor for %q failed: %v", filename, err) + } + } + } + return false, nil + }) + if err != nil { + klog.Fatalf("file observer failed: %v", err) + } +} + +// Run will start a new observer. +func (o *pollingObserver) Run(stopChan <-chan struct{}) { + klog.Info("Starting file observer") + defer klog.Infof("Shutting down file observer") + o.processReactors(stopChan) +} + +func calculateFileHash(path string) (string, error) { + stat, statErr := os.Stat(path) + if statErr != nil { + if os.IsNotExist(statErr) { + return "", nil + } + return "", statErr + } + if stat.IsDir() { + return "", fmt.Errorf("you can watch only files, %s is a directory", path) + } + + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + defer f.Close() + return calculateHash(f) +} + +func calculateHash(content io.Reader) (string, error) { + hasher := sha256.New() + if _, err := io.Copy(hasher, content); err != nil { + return "", err + } + return hex.EncodeToString(hasher.Sum(nil)), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go new file mode 100644 index 0000000000..d038f598e2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go @@ -0,0 +1,53 @@ +package metrics + +import ( + "net/url" + "time" + + "k8s.io/client-go/tools/metrics" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // requestLatency is a Prometheus Summary metric type partitioned by + // "verb" and "url" labels. It is used for the rest client latency metrics. + requestLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "rest_client_request_latency_seconds", + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, + []string{"verb", "url"}, + ) + + requestResult = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "rest_client_requests_total", + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, + []string{"code", "method", "host"}, + ) +) + +func init() { + prometheus.MustRegister(requestLatency) + prometheus.MustRegister(requestResult) + metrics.Register(&latencyAdapter{requestLatency}, &resultAdapter{requestResult}) +} + +type latencyAdapter struct { + m *prometheus.HistogramVec +} + +func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) { + l.m.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +type resultAdapter struct { + m *prometheus.CounterVec +} + +func (r *resultAdapter) Increment(code, method, host string) { + r.m.WithLabelValues(code, method, host).Inc() +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go new file mode 100644 index 0000000000..e14f854e65 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go @@ -0,0 +1,90 @@ +package metrics + +import ( + "k8s.io/client-go/util/workqueue" + + "github.com/prometheus/client_golang/prometheus" +) + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +func init() { + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +type prometheusMetricsProvider struct{} + +func (prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + depth := prometheus.NewGauge(prometheus.GaugeOpts{ + Subsystem: name, + Name: "depth", + Help: "Current depth of workqueue: " + name, + }) + prometheus.Register(depth) + return depth +} + +func (prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + adds := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: name, + Name: "adds", + Help: "Total number of adds handled by workqueue: " + name, + }) + prometheus.Register(adds) + return adds +} + +func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.SummaryMetric { + latency := prometheus.NewSummary(prometheus.SummaryOpts{ + Subsystem: name, + Name: "queue_latency", + Help: "How long an item stays in workqueue" + name + " before being requested.", + }) + prometheus.Register(latency) + return latency +} + +func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.SummaryMetric { + workDuration := prometheus.NewSummary(prometheus.SummaryOpts{ + Subsystem: name, + Name: "work_duration", + Help: "How long processing an item from workqueue" + name + " takes.", + }) + prometheus.Register(workDuration) + return workDuration +} + +func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := prometheus.NewGauge(prometheus.GaugeOpts{ + Subsystem: name, + Name: "unfinished_work_seconds", + Help: "How many seconds of work " + name + " has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }) + prometheus.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := prometheus.NewGauge(prometheus.GaugeOpts{ + Subsystem: name, + Name: "longest_running_processor_microseconds", + Help: "How many microseconds has the longest running " + + "processor for " + name + " been running.", + }) + prometheus.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + retries := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: name, + Name: "retries", + Help: "Total number of retries handled by workqueue: " + name, + }) + prometheus.Register(retries) + return retries +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/ownerref.go b/vendor/github.com/openshift/library-go/pkg/controller/ownerref.go new file mode 100644 index 0000000000..9c778934aa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/ownerref.go @@ -0,0 +1,60 @@ +package controller + +import ( + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnsureOwnerRef adds the ownerref if needed. Removes ownerrefs with conflicting UIDs. +// Returns true if the input is mutated. +func EnsureOwnerRef(metadata metav1.Object, newOwnerRef metav1.OwnerReference) bool { + foundButNotEqual := false + for _, existingOwnerRef := range metadata.GetOwnerReferences() { + if existingOwnerRef.APIVersion == newOwnerRef.APIVersion && + existingOwnerRef.Kind == newOwnerRef.Kind && + existingOwnerRef.Name == newOwnerRef.Name { + + // if we're completely the same, there's nothing to do + if equality.Semantic.DeepEqual(existingOwnerRef, newOwnerRef) { + return false + } + + foundButNotEqual = true + break + } + } + + // if we weren't found, then we just need to add ourselves + if !foundButNotEqual { + metadata.SetOwnerReferences(append(metadata.GetOwnerReferences(), newOwnerRef)) + return true + } + + // if we need to remove an existing ownerRef, just do the easy thing and build it back from scratch + newOwnerRefs := []metav1.OwnerReference{newOwnerRef} + for i := range metadata.GetOwnerReferences() { + existingOwnerRef := metadata.GetOwnerReferences()[i] + if existingOwnerRef.APIVersion == newOwnerRef.APIVersion && + existingOwnerRef.Kind == newOwnerRef.Kind && + existingOwnerRef.Name == newOwnerRef.Name { + continue + } + newOwnerRefs = append(newOwnerRefs, existingOwnerRef) + } + metadata.SetOwnerReferences(newOwnerRefs) + return true +} + +// HasOwnerRef checks to see if an object has a particular owner. It is not opinionated about +// the bool fields +func HasOwnerRef(metadata metav1.Object, needle metav1.OwnerReference) bool { + for _, existingOwnerRef := range metadata.GetOwnerReferences() { + if existingOwnerRef.APIVersion == needle.APIVersion && + existingOwnerRef.Kind == needle.Kind && + existingOwnerRef.Name == needle.Name && + existingOwnerRef.UID == needle.UID { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go new file mode 100644 index 0000000000..7919321f89 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -0,0 +1,1031 @@ +package crypto + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/util/cert" +) + +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, +} + +// TLSVersionToNameOrDie given a tls version as an int, return its readable name +func TLSVersionToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range versions { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} +func TLSVersionOrDie(versionName string) uint16 { + version, err := TLSVersion(versionName) + if err != nil { + panic(err) + } + return version +} +func ValidTLSVersions() []string { + validVersions := []string{} + for k := range versions { + validVersions = append(validVersions, k) + } + sort.Strings(validVersions) + return validVersions +} +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} + +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names +func CipherSuitesToNamesOrDie(intVals []uint16) []string { + ret := []string{} + for _, intVal := range intVals { + ret = append(ret, CipherSuiteToNameOrDie(intVal)) + } + + return ret +} + +// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name +func CipherSuiteToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range ciphers { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func CipherSuite(cipherName string) (uint16, error) { + if cipher, ok := ciphers[cipherName]; ok { + return cipher, nil + } + return 0, fmt.Errorf("unknown cipher name %q", cipherName) +} + +func CipherSuitesOrDie(cipherNames []string) []uint16 { + if len(cipherNames) == 0 { + return DefaultCiphers() + } + cipherValues := []uint16{} + for _, cipherName := range cipherNames { + cipher, err := CipherSuite(cipherName) + if err != nil { + panic(err) + } + cipherValues = append(cipherValues, cipher) + } + return cipherValues +} +func ValidCipherSuites() []string { + validCipherSuites := []string{} + for k := range ciphers { + validCipherSuites = append(validCipherSuites, k) + } + sort.Strings(validCipherSuites) + return validCipherSuites +} +func DefaultCiphers() []uint16 { + // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher + // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for + // perfect forward secrecy. Servers may provide additional cipher + // suites for backwards compatibility with HTTP/1.1 clients. + // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A + // (TLS 1.2 Cipher Suite Black List). + return []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2 + // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index + // because it comes after ciphers forbidden by the http/2 spec + // tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + } +} + +// SecureTLSConfig enforces the default minimum security settings for the cluster. +func SecureTLSConfig(config *tls.Config) *tls.Config { + if config.MinVersion == 0 { + config.MinVersion = DefaultTLSVersion() + } + + config.PreferServerCipherSuites = true + if len(config.CipherSuites) == 0 { + config.CipherSuites = DefaultCiphers() + } + return config +} + +type TLSCertificateConfig struct { + Certs []*x509.Certificate + Key crypto.PrivateKey +} + +type TLSCARoots struct { + Roots []*x509.Certificate +} + +func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error { + // ensure parent dir + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return err + } + certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return err + } + keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + + if err := writeCertificates(certFileWriter, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFileWriter, c.Key); err != nil { + return err + } + + if err := certFileWriter.Close(); err != nil { + return err + } + if err := keyFileWriter.Close(); err != nil { + return err + } + + return nil +} + +func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error { + if err := writeCertificates(certFile, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFile, c.Key); err != nil { + return err + } + return nil +} + +func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) { + certBytes, err := EncodeCertificates(c.Certs...) + if err != nil { + return nil, nil, err + } + keyBytes, err := encodeKey(c.Key) + if err != nil { + return nil, nil, err + } + + return certBytes, keyBytes, nil +} + +func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) { + if len(certFile) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyFile) == 0 { + return nil, errors.New("keyFile missing") + } + + certPEMBlock, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + certs, err := cert.ParseCertsPEM(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %s", certFile, err) + } + + keyPEMBlock, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, err + } + keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) { + if len(certBytes) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyBytes) == 0 { + return nil, errors.New("keyFile missing") + } + + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + return nil, fmt.Errorf("Error reading cert: %s", err) + } + + keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +const ( + DefaultCertificateLifetimeInDays = 365 * 2 // 2 years + DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years + + // Default keys are 2048 bits + keyBits = 2048 +) + +type CA struct { + Config *TLSCertificateConfig + + SerialGenerator SerialGenerator +} + +// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe. +type SerialGenerator interface { + Next(template *x509.Certificate) (int64, error) +} + +// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +type SerialFileGenerator struct { + SerialFile string + + // lock guards access to the Serial field + lock sync.Mutex + Serial int64 +} + +func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) { + // read serial file, it must already exist + serial, err := fileToSerial(serialFile) + if err != nil { + return nil, err + } + + generator := &SerialFileGenerator{ + Serial: serial, + SerialFile: serialFile, + } + + // 0 is unused and 1 is reserved for the CA itself + // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+ + // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative) + if generator.Serial < 1 { + // fake a call to Next so the file stays in sync and Serial is incremented + if _, err := generator.Next(&x509.Certificate{}); err != nil { + return nil, err + } + } + + return generator, nil +} + +// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file + serial, err := fileToSerial(s.SerialFile) + if err != nil { + return 0, err + } + if serial != s.Serial { + return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial) + } + + next := s.Serial + 1 + s.Serial = next + + // Output in hex, padded to multiples of two characters for OpenSSL's sake + serialText := fmt.Sprintf("%X", next) + if len(serialText)%2 == 1 { + serialText = "0" + serialText + } + // always add a newline at the end to have a valid file + serialText += "\n" + + if err := ioutil.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil { + return 0, err + } + return next, nil +} + +func fileToSerial(serialFile string) (int64, error) { + serialData, err := ioutil.ReadFile(serialFile) + if err != nil { + return 0, err + } + + // read the file as a single hex number after stripping any whitespace + serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64) + if err != nil { + return 0, err + } + + if serial < 0 { + return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile) + } + + return serial, nil +} + +// RandomSerialGenerator returns a serial based on time.Now and the subject +type RandomSerialGenerator struct { +} + +func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) { + r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano())) + return r.Int63(), nil +} + +// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error +// if serialFile is empty, a RandomSerialGenerator will be used +func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { + return ca, false, err + } + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays) + return ca, true, err +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func GetCA(certFile, keyFile, serialFile string) (*CA, error) { + caConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { + caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes) + if err != nil { + return nil, err + } + + return &CA{ + SerialGenerator: &RandomSerialGenerator{}, + Config: caConfig, + }, nil +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) + + caConfig, err := MakeSelfSignedCAConfig(name, expireDays) + if err != nil { + return nil, err + } + if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := ioutil.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { + var caLifetimeInDays = DefaultCACertificateLifetimeInDays + if expireDays > 0 { + caLifetimeInDays = expireDays + } + + if caLifetimeInDays > DefaultCACertificateLifetimeInDays { + warnAboutCertificateLifeTime(name, DefaultCACertificateLifetimeInDays) + } + + caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour + + return MakeSelfSignedCAConfigForDuration(name, caLifetime) +} + +func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + // Create CA cert + rootcaPublicKey, rootcaPrivateKey, err := NewKeyPair() + if err != nil { + return nil, err + } + rootcaTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now) + rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) + if err != nil { + return nil, err + } + caConfig := &TLSCertificateConfig{ + Certs: []*x509.Certificate{rootcaCert}, + Key: rootcaPrivateKey, + } + return caConfig, nil +} + +func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) { + // Create CA cert + signerPublicKey, signerPrivateKey, err := NewKeyPair() + if err != nil { + return nil, err + } + signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now) + signerCert, err := issuer.signCertificate(signerTemplate, signerPublicKey) + if err != nil { + return nil, err + } + signerConfig := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...), + Key: signerPrivateKey, + } + return signerConfig, nil +} + +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetServerCert(certFile, keyFile, hostnames) + if err != nil { + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays) + return certConfig, true, err + } + + return certConfig, false, nil +} + +func GetServerCert(certFile, keyFile string, hostnames sets.String) (*TLSCertificateConfig, error) { + server, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + cert := server.Certs[0] + ips, dns := IPAddressesDNSNames(hostnames.List()) + missingIps := ipsNotInSlice(ips, cert.IPAddresses) + missingDns := stringsNotInSlice(dns, cert.DNSNames) + if len(missingIps) == 0 && len(missingDns) == 0 { + klog.V(4).Infof("Found existing server certificate in %s", certFile) + return server, nil + } + + return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps) +} + +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) + + server, err := ca.MakeServerCert(hostnames, expireDays) + if err != nil { + return nil, err + } + if err := server.WriteCertConfigFile(certFile, keyFile); err != nil { + return server, err + } + return server, nil +} + +// CertificateExtensionFunc is passed a certificate that it may extend, or return an error +// if the extension attempt failed. +type CertificateExtensionFunc func(*x509.Certificate) error + +func (ca *CA) MakeServerCert(hostnames sets.String, expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, _ := NewKeyPair() + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), expireDays, time.Now) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) MakeServerCertForDuration(hostnames sets.String, lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, _ := NewKeyPair() + serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), lifetime, time.Now) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays) + return certConfig, true, err // true indicates we wrote the files. + } + + return certConfig, false, nil +} + +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) + // ensure parent dirs + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return nil, err + } + + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplate(userToSubject(u), expireDays, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + if err = ioutil.WriteFile(certFile, certData, os.FileMode(0644)); err != nil { + return nil, err + } + if err = ioutil.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil { + return nil, err + } + + return GetTLSCertificateConfig(certFile, keyFile) +} + +func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplateForDuration(userToSubject(u), lifetime, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + return GetTLSCertificateConfigFromBytes(certData, keyData) +} + +type sortedForDER []string + +func (s sortedForDER) Len() int { + return len(s) +} +func (s sortedForDER) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedForDER) Less(i, j int) bool { + l1 := len(s[i]) + l2 := len(s[j]) + if l1 == l2 { + return s[i] < s[j] + } + return l1 < l2 +} + +func userToSubject(u user.Info) pkix.Name { + // Ok we are going to order groups in a peculiar way here to workaround a + // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which + // incorrectly encodes Multivalued RDNs and another in GNUTLS clients + // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403) + // and try to "correct" this issue when reading client certs. + // + // This workaround should be killed once Golang's pkix module is fixed to + // generate a correct DER encoding. + // + // The workaround relies on the fact that the first octect that differs + // between the encoding of two group RDNs will end up being the encoded + // length which is directly related to the group name's length. So we'll + // sort such that shortest names come first. + ugroups := u.GetGroups() + groups := make([]string, len(ugroups)) + copy(groups, ugroups) + sort.Sort(sortedForDER(groups)) + + return pkix.Name{ + CommonName: u.GetName(), + SerialNumber: u.GetUID(), + Organization: groups, + } +} + +func (ca *CA) signCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) { + // Increment and persist serial + serial, err := ca.SerialGenerator.Next(template) + if err != nil { + return nil, err + } + template.SerialNumber = big.NewInt(serial) + return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key) +} + +func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return nil, nil, err + } + return &privateKey.PublicKey, privateKey, nil +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(caLifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime) +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + template := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts) + + return template +} + +func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) { + ips := []net.IP{} + dns := []string{} + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + ips = append(ips, ip) + } else { + dns = append(dns, host) + } + } + + // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries + // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766 + for _, ip := range ips { + dns = append(dns, ip.String()) + } + + return ips, dns +} + +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("Could not read any certificates") + } + return certs, nil +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newClientCertificateTemplateForDuration(subject, lifetime, currentTime) +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } +} + +func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) { + defaultLifetimeInYears := defaultLifetimeInDays / 365 + fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) + fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") +} + +func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("Expected a single certificate") + } + return certs[0], nil +} + +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} +func encodeKey(key crypto.PrivateKey) ([]byte, error) { + b := bytes.Buffer{} + switch key := key.(type) { + case *ecdsa.PrivateKey: + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return []byte{}, err + } + if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return b.Bytes(), err + } + case *rsa.PrivateKey: + if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil { + return []byte{}, err + } + default: + return []byte{}, errors.New("Unrecognized key type") + + } + return b.Bytes(), nil +} + +func writeCertificates(f io.Writer, certs ...*x509.Certificate) error { + bytes, err := EncodeCertificates(certs...) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} +func writeKeyFile(f io.Writer, key crypto.PrivateKey) error { + bytes, err := encodeKey(key) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} + +func stringsNotInSlice(needles []string, haystack []string) []string { + missing := []string{} + for _, needle := range needles { + if !stringInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func stringInSlice(needle string, haystack []string) bool { + for _, straw := range haystack { + if needle == straw { + return true + } + } + return false +} + +func ipsNotInSlice(needles []net.IP, haystack []net.IP) []net.IP { + missing := []net.IP{} + for _, needle := range needles { + if !ipInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func ipInSlice(needle net.IP, haystack []net.IP) bool { + for _, straw := range haystack { + if needle.Equal(straw) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go new file mode 100644 index 0000000000..0aa127037c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "crypto/x509" + "time" +) + +// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired. +// The function returns new bundle with only valid certificates or error if no valid certificate is found. +func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate { + currentTime := time.Now() + var validCerts []*x509.Certificate + for _, c := range certs { + if c.NotAfter.After(currentTime) { + validCerts = append(validCerts, c) + } + } + + return validCerts +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt new file mode 100644 index 0000000000..b6140c7abb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICMjCCAdmgAwIBAgIUdTpx2/qycBZJltbEdfTyfKyJjG0wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTIwODAwWhcN +MTgwNzMwMTIwOTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABMlJR5tWK7vgCytCxBQov1xNp+R9RG2wI1w9 +SXIn+Za97Nf6krdyUDd+P6QSSJDkRTQZDsGiCpJhgd5kAzFNUkajgZgwgZUwDgYD +VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw +HQYDVR0OBBYEFOERFpshmIXspqXoox9gnSFGmm3PMB8GA1UdIwQYMBaAFCtdC7xd +NJKjmyiwhZJH7LBLOLrgMCAGA1UdEQQZMBeCFWV0Y2Rwcm94eS10ZXN0cy5sb2Nh +bDAKBggqhkjOPQQDAgNHADBEAiAvsq9L5uk0jg3v2z1xemAUwPXrEIAcbJhXFfC0 +QmVGGgIgFT9d/inKJcm/NfAgDGkoXSvHGv0NKAZpR32Dqriobh4= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt new file mode 100644 index 0000000000..b321982a74 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUQ0hq1Lmd6ujao+8Iy6LfpMdyNI8wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMDAwWhcN +MjMwNzI5MTExMDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABGoowUY2eQdvaHG4S/UMYD6mjs6/P7mmhizl +KWO03gq2eVSsbiYAnCJok3o2WQ01GtcS6bOUJ1DOG0gLTRfQ/lWjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQmqCeN+suT +0JjgSxtCqTln7zonHjAfBgNVHSMEGDAWgBQmqCeN+suT0JjgSxtCqTln7zonHjAK +BggqhkjOPQQDAgNIADBFAiAUKV8vkiIoCiqtHQsp3PrUUV3He2B9K1tQgA8loTa+ +IQIhANPbCDVoPSFsX0I5iG/DQl/MmTo/tlsmNkN99j1j2JIM +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUU8ZsD37pcA1UYkgwhR6d/KjdGeAwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABLupsOF50q6GE7z2US77t5iLGe9wdOFwHssC +jUjCEGvJ/d2sGMxdiABJrrB8gau6TilrJCy9ZTYj56fzdReUnsKjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRhaKyklrnI +wd2kg84t1D8CvDVtdjAfBgNVHSMEGDAWgBRhaKyklrnIwd2kg84t1D8CvDVtdjAK +BggqhkjOPQQDAgNIADBFAiAOCYqtOamRapNc+XxR7IFzlr7Si7EvjQ+ej5SKHb7g +rgIhAIBd1dtMc0KJSFsoxnQZailkFi5Nlea2eHU1wEDKVb40 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/zCCAaagAwIBAgIUVCSMefpK8uxDKy87jKnwc97DseIwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABOhGVuxW0nEQ5REqQdRF1eJ7OUOdXB/oDJed +Jr1ezcyhJyCRvD9DfadSBvMHFyzw7ssBIIMm4C3Eufj96M3tSACjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBTLR9qOF3Hh +if8KUbkrRYUK13xSSDAfBgNVHSMEGDAWgBTLR9qOF3Hhif8KUbkrRYUK13xSSDAK +BggqhkjOPQQDAgNHADBEAiAFD2zRXnp40wVeffwpkU+ToFF6Nts/HJk02iMr/+km +RgIgRLZxonlkyLlUHucMKC2V+4UJ9akEbu/bhCXKuQb2DgY= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt new file mode 100644 index 0000000000..862bdbc2df --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUWke4fSfaCH+2MLSFeTHBpoi+h1YwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTA1MDAwWhcN +MjMwNzI5MTA1MDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABHoqBfTXFdWRATfdrr/v5UriZBxmzL5aiwLZ +VRUg2UZNnoH2JLUcDkqx3IQakjoVijweiQeqxAai3mxjtgxbh+ajZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgDE3RpOiH +Gv7AEnYKRk46zVIkbzAfBgNVHSMEGDAWgBSgDE3RpOiHGv7AEnYKRk46zVIkbzAK +BggqhkjOPQQDAgNIADBFAiA3Gg/gwiEfjclpQYyd3qTgdCWzud8GKRdjVK3Z2BXW +swIhANMuxi0Y41mwcmh3a2icpdeGHGyGNdNDe8uF+5csuNUp +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key new file mode 100644 index 0000000000..83cf18be62 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIC+UyR59JEbt/qjWZG/87ZYzk0pOgTBmpx5R0w6uG66JoAoGCCqGSM49 +AwEHoUQDQgAEeioF9NcV1ZEBN92uv+/lSuJkHGbMvlqLAtlVFSDZRk2egfYktRwO +SrHchBqSOhWKPB6JB6rEBqLebGO2DFuH5g== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/openshift/library-go/pkg/git/OWNERS b/vendor/github.com/openshift/library-go/pkg/git/OWNERS new file mode 100644 index 0000000000..e3dd451966 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/OWNERS @@ -0,0 +1,12 @@ +reviewers: + - smarterclayton + - csrwng + - bparees + - gabemontero + - mfojtik + - jim-minter +approvers: + - smarterclayton + - csrwng + - bparees + - mfojtik diff --git a/vendor/github.com/openshift/library-go/pkg/git/doc.go b/vendor/github.com/openshift/library-go/pkg/git/doc.go new file mode 100644 index 0000000000..e1a419ef71 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/doc.go @@ -0,0 +1,2 @@ +// Package git allows working with Git repositories +package git diff --git a/vendor/github.com/openshift/library-go/pkg/git/git.go b/vendor/github.com/openshift/library-go/pkg/git/git.go new file mode 100644 index 0000000000..1ac681b8c5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/git.go @@ -0,0 +1,51 @@ +package git + +import ( + "bufio" + "io" + "net/url" + "path" + "strings" +) + +// NameFromRepositoryURL suggests a name for a repository URL based on the last +// segment of the path, or returns false +func NameFromRepositoryURL(url *url.URL) (string, bool) { + // from path + if len(url.Path) > 0 { + base := path.Base(url.Path) + if len(base) > 0 && base != "/" { + if ext := path.Ext(base); ext == ".git" { + base = base[:len(base)-4] + } + return base, true + } + } + return "", false +} + +type ChangedRef struct { + Ref string + Old string + New string +} + +func ParsePostReceive(r io.Reader) ([]ChangedRef, error) { + refs := []ChangedRef{} + scan := bufio.NewScanner(r) + for scan.Scan() { + segments := strings.Split(scan.Text(), " ") + if len(segments) != 3 { + continue + } + refs = append(refs, ChangedRef{ + Ref: segments[2], + Old: segments[0], + New: segments[1], + }) + } + if err := scan.Err(); err != nil && err != io.EOF { + return nil, err + } + return refs, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/git/repository.go b/vendor/github.com/openshift/library-go/pkg/git/repository.go new file mode 100644 index 0000000000..0f21235906 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/repository.go @@ -0,0 +1,552 @@ +package git + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "syscall" + "time" + + "k8s.io/klog" +) + +// Repository represents a git source repository +type Repository interface { + GetRootDir(dir string) (string, error) + GetOriginURL(dir string) (string, bool, error) + GetRef(dir string) string + Clone(dir string, url string) error + CloneWithOptions(dir string, url string, args ...string) error + CloneBare(dir string, url string) error + CloneMirror(dir string, url string) error + Fetch(dir string, url string, ref string) error + Checkout(dir string, ref string) error + PotentialPRRetryAsFetch(dir string, url string, ref string, err error) error + SubmoduleUpdate(dir string, init, recursive bool) error + Archive(dir, ref, format string, w io.Writer) error + Init(dir string, bare bool) error + Add(dir string, spec string) error + Commit(dir string, message string) error + AddRemote(dir string, name, url string) error + AddLocalConfig(dir, name, value string) error + ShowFormat(dir, commit, format string) (string, error) + ListRemote(url string, args ...string) (string, string, error) + TimedListRemote(timeout time.Duration, url string, args ...string) (string, string, error) + GetInfo(location string) (*SourceInfo, []error) +} + +const ( + // defaultCommandTimeout is the default timeout for git commands that we want to enforce timeouts on + defaultCommandTimeout = 30 * time.Second + + // Shallow maps to --depth=1, which clones a Git repository without + // downloading history + Shallow = "--depth=1" + + // noCommandTimeout signals that there should be no timeout for the command when passed as the timeout + // for the default timedExecGitFunc + noCommandTimeout = 0 * time.Second +) + +// ErrGitNotAvailable will be returned if the git call fails because a git binary +// could not be found +var ErrGitNotAvailable = errors.New("git binary not available") + +// SourceInfo stores information about the source code +type SourceInfo struct { + // Ref represents a commit SHA-1, valid Git branch name or a Git tag + // The output image will contain this information as 'io.openshift.build.commit.ref' label. + Ref string + + // CommitID represents an arbitrary extended object reference in Git as SHA-1 + // The output image will contain this information as 'io.openshift.build.commit.id' label. + CommitID string + + // Date contains a date when the committer created the commit. + // The output image will contain this information as 'io.openshift.build.commit.date' label. + Date string + + // AuthorName contains the name of the author + // The output image will contain this information (along with AuthorEmail) as 'io.openshift.build.commit.author' label. + AuthorName string + + // AuthorEmail contains the e-mail of the author + // The output image will contain this information (along with AuthorName) as 'io.openshift.build.commit.author' lablel. + AuthorEmail string + + // CommitterName contains the name of the committer + CommitterName string + + // CommitterEmail contains the e-mail of the committer + CommitterEmail string + + // Message represents the first 80 characters from the commit message. + // The output image will contain this information as 'io.openshift.build.commit.message' label. + Message string + + // Location contains a valid URL to the original repository. + // The output image will contain this information as 'io.openshift.build.source-location' label. + Location string + + // ContextDir contains path inside the Location directory that + // contains the application source code. + // The output image will contain this information as 'io.openshift.build.source-context-dir' + // label. + ContextDir string +} + +// execGitFunc is a function that executes a Git command +type execGitFunc func(dir string, args ...string) (string, string, error) + +// timedExecGitFunc is a function that executes a Git command with a timeout +type timedExecGitFunc func(timeout time.Duration, dir string, args ...string) (string, string, error) + +type repository struct { + git execGitFunc + timedGit timedExecGitFunc + + shallow bool +} + +// NewRepository creates a new Repository +func NewRepository() Repository { + return NewRepositoryWithEnv(nil) +} + +// NewRepositoryForEnv creates a new Repository using the specified environment +func NewRepositoryWithEnv(env []string) Repository { + return &repository{ + git: func(dir string, args ...string) (string, string, error) { + return command("git", dir, env, args...) + }, + timedGit: func(timeout time.Duration, dir string, args ...string) (string, string, error) { + return timedCommand(timeout, "git", dir, env, args...) + }, + } +} + +// NewRepositoryForBinary returns a Repository using the specified +// git executable. +func NewRepositoryForBinary(gitBinaryPath string) Repository { + return NewRepositoryForBinaryWithEnvironment(gitBinaryPath, nil) +} + +// NewRepositoryForBinary returns a Repository using the specified +// git executable and environment +func NewRepositoryForBinaryWithEnvironment(gitBinaryPath string, env []string) Repository { + return &repository{ + git: func(dir string, args ...string) (string, string, error) { + return command(gitBinaryPath, dir, env, args...) + }, + timedGit: func(timeout time.Duration, dir string, args ...string) (string, string, error) { + return timedCommand(timeout, gitBinaryPath, dir, env, args...) + }, + } +} + +// IsRoot returns true if location is the root of a bare git repository +func IsBareRoot(path string) (bool, error) { + _, err := os.Stat(filepath.Join(path, "HEAD")) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// PotentialPRRetryAsFetch is used on checkout errors after a clone where the possibility +// that a fetch or a PR ref is needed between the clone and checkout operations +// Cases include: 1) GitHub PRs (example ref forms: (refs/)?pull/[1-9][0-9]*/head); +// 2) Refs which the RHEL7 git version appears to be too old to handle correctly +// (example ref form: foo-bar-1), but which newer git versions seem to manage OK. +func (r *repository) PotentialPRRetryAsFetch(dir, remote, ref string, err error) error { + klog.V(4).Infof("Checkout after clone failed for ref %s with error: %v, attempting fetch", ref, err) + err = r.Fetch(dir, remote, ref) + if err != nil { + return err + } + + err = r.Checkout(dir, "FETCH_HEAD") + if err != nil { + return err + } + klog.V(4).Infof("Fetch / checkout for %s successful", ref) + return nil +} + +// GetRootDir obtains the directory root for a Git repository +func (r *repository) GetRootDir(location string) (string, error) { + dir, _, err := r.git(location, "rev-parse", "--git-dir") + if err != nil { + return "", err + } + if dir == "" { + return "", fmt.Errorf("%s is not a git repository", dir) + } + if strings.HasSuffix(dir, ".git") { + dir = dir[:len(dir)-4] + if strings.HasSuffix(dir, "/") { + dir = dir[:len(dir)-1] + } + } + if len(dir) == 0 { + dir = location + } + return dir, nil +} + +var ( + remoteURLExtract = regexp.MustCompile("^remote\\.(.*)\\.url (.*?)$") + remoteOriginNames = []string{"origin", "upstream", "github", "openshift", "heroku"} +) + +// GetOriginURL returns the origin branch URL for the git repository +func (r *repository) GetOriginURL(location string) (string, bool, error) { + text, _, err := r.git(location, "config", "--get-regexp", "^remote\\..*\\.url$") + if err != nil { + if IsExitCode(err, 1) { + return "", false, nil + } + return "", false, err + } + + remotes := make(map[string]string) + s := bufio.NewScanner(bytes.NewBufferString(text)) + for s.Scan() { + if matches := remoteURLExtract.FindStringSubmatch(s.Text()); matches != nil { + remotes[matches[1]] = matches[2] + } + } + if err := s.Err(); err != nil { + return "", false, err + } + for _, remote := range remoteOriginNames { + if url, ok := remotes[remote]; ok { + return url, true, nil + } + } + + return "", false, nil +} + +// GetRef retrieves the current branch reference for the git repository +func (r *repository) GetRef(location string) string { + branch, _, err := r.git(location, "symbolic-ref", "-q", "--short", "HEAD") + if err != nil { + branch = "" + } + return branch +} + +// AddRemote adds a new remote to the repository. +func (r *repository) AddRemote(location, name, url string) error { + _, _, err := r.git(location, "remote", "add", name, url) + return err +} + +// AddLocalConfig adds a value to the current repository +func (r *repository) AddLocalConfig(location, name, value string) error { + _, _, err := r.git(location, "config", "--local", "--add", name, value) + return err +} + +// CloneWithOptions clones a remote git repository to a local directory +func (r *repository) CloneWithOptions(location string, url string, args ...string) error { + gitArgs := []string{"clone"} + gitArgs = append(gitArgs, args...) + gitArgs = append(gitArgs, url) + gitArgs = append(gitArgs, location) + + // We need to check to see if we're importing reference information, for + // for error checking later on + for _, opt := range gitArgs { + if opt == Shallow { + r.shallow = true + break + } + } + + _, _, err := r.git("", gitArgs...) + return err +} + +// Clone clones a remote git repository to a local directory +func (r *repository) Clone(location string, url string) error { + return r.CloneWithOptions(location, url, "--recursive") +} + +// CloneMirror clones a remote git repository to a local directory as a mirror +func (r *repository) CloneMirror(location string, url string) error { + return r.CloneWithOptions(location, url, "--mirror") +} + +// CloneBare clones a remote git repository to a local directory +func (r *repository) CloneBare(location string, url string) error { + return r.CloneWithOptions(location, url, "--bare") +} + +// ListRemote lists references in a remote repository +// ListRemote will time out with a default timeout of 10s. If a different timeout is +// required, TimedListRemote should be used instead +func (r *repository) ListRemote(url string, args ...string) (string, string, error) { + return r.TimedListRemote(defaultCommandTimeout, url, args...) +} + +// TimedListRemote lists references in a remote repository, or fails if the list does +// not complete before the given timeout +func (r *repository) TimedListRemote(timeout time.Duration, url string, args ...string) (string, string, error) { + gitArgs := []string{"ls-remote"} + gitArgs = append(gitArgs, args...) + gitArgs = append(gitArgs, url) + // `git ls-remote` does not allow for any timeout to be set, and defaults to a timeout + // of five minutes, so we enforce a timeout here to allow it to fail eariler than that + return r.timedGit(timeout, "", gitArgs...) +} + +// Fetch updates the provided git repository +func (r *repository) Fetch(location, uri, ref string) error { + _, _, err := r.git(location, "fetch", uri, ref) + return err +} + +// Archive creates a archive of the Git repo at directory location at commit ref and with the given Git format, +// and then writes that to the provided io.Writer +func (r *repository) Archive(location, ref, format string, w io.Writer) error { + stdout, _, err := r.git(location, "archive", fmt.Sprintf("--format=%s", format), ref) + w.Write([]byte(stdout)) + return err +} + +// Checkout switches to the given ref for the git repository +func (r *repository) Checkout(location string, ref string) error { + if r.shallow { + return errors.New("cannot checkout ref on shallow clone") + } + _, _, err := r.git(location, "checkout", ref, "--") + return err +} + +// SubmoduleUpdate updates submodules, optionally recursively +func (r *repository) SubmoduleUpdate(location string, init, recursive bool) error { + updateArgs := []string{"submodule", "update"} + if init { + updateArgs = append(updateArgs, "--init") + } + if recursive { + updateArgs = append(updateArgs, "--recursive") + } + + _, _, err := r.git(location, updateArgs...) + return err +} + +// ShowFormat formats the ref with the given git show format string +func (r *repository) ShowFormat(location, ref, format string) (string, error) { + out, _, err := r.git(location, "show", "-s", ref, fmt.Sprintf("--format=%s", format)) + return out, err +} + +// Init initializes a new git repository in the provided location +func (r *repository) Init(location string, bare bool) error { + args := []string{"init"} + if bare { + args = append(args, "--bare") + } + args = append(args, location) + _, _, err := r.git("", args...) + return err +} + +func (r *repository) Add(location, spec string) error { + _, _, err := r.git(location, "add", spec) + return err +} + +func (r *repository) Commit(location, message string) error { + _, _, err := r.git(location, "commit", "-m", message) + return err +} + +// GetInfo retrieves the informations about the source code and commit +func (r *repository) GetInfo(location string) (*SourceInfo, []error) { + errors := []error{} + git := func(arg ...string) string { + stdout, stderr, err := r.git(location, arg...) + if err != nil { + errors = append(errors, fmt.Errorf("error invoking 'git %s': %v. Out: %s, Err: %s", + strings.Join(arg, " "), err, stdout, stderr)) + } + return strings.TrimSpace(stdout) + } + info := &SourceInfo{} + info.Ref = git("rev-parse", "--abbrev-ref", "HEAD") + info.CommitID = git("rev-parse", "--verify", "HEAD") + info.AuthorName = git("--no-pager", "show", "-s", "--format=%an", "HEAD") + info.AuthorEmail = git("--no-pager", "show", "-s", "--format=%ae", "HEAD") + info.CommitterName = git("--no-pager", "show", "-s", "--format=%cn", "HEAD") + info.CommitterEmail = git("--no-pager", "show", "-s", "--format=%ce", "HEAD") + info.Date = git("--no-pager", "show", "-s", "--format=%ad", "HEAD") + info.Message = git("--no-pager", "show", "-s", "--format=%<(80,trunc)%s", "HEAD") + + // it is not required for a Git repository to have a remote "origin" defined + if out, _, err := r.git(location, "config", "--get", "remote.origin.url"); err == nil { + info.Location = out + } + + return info, errors +} + +// command executes an external command in the given directory. +// The command's standard out and error are trimmed and returned as strings +// It may return the type *GitError if the command itself fails. +func command(name, dir string, env []string, args ...string) (stdout, stderr string, err error) { + return timedCommand(noCommandTimeout, name, dir, env, args...) +} + +// timedCommand executes an external command in the given directory with a timeout. +// The command's standard out and error are returned as strings. +// It may return the type *GitError if the command itself fails or the type *TimeoutError +// if the command times out before finishing. +// If the git binary cannot be found, ErrGitNotAvailable will be returned as the error. +func timedCommand(timeout time.Duration, name, dir string, env []string, args ...string) (stdout, stderr string, err error) { + var stdoutBuffer, stderrBuffer bytes.Buffer + + klog.V(4).Infof("Executing %s %s", name, strings.Join(args, " ")) + + cmd := exec.Command(name, args...) + cmd.Dir = dir + cmd.Env = env + cmd.Stdout = &stdoutBuffer + cmd.Stderr = &stderrBuffer + + if env != nil { + klog.V(8).Infof("Environment:\n") + for _, e := range env { + klog.V(8).Infof("- %s", e) + } + } + + err, timedOut := runCommand(cmd, timeout) + if timedOut { + return "", "", &TimeoutError{ + Err: fmt.Errorf("execution of %s %s timed out after %s", name, strings.Join(args, " "), timeout), + } + } + + // we don't want captured output to have a trailing newline for formatting reasons + stdout, stderr = strings.TrimRight(stdoutBuffer.String(), "\n"), strings.TrimRight(stderrBuffer.String(), "\n") + + // check whether git was available in the first place + if err != nil { + if !isBinaryInstalled(name) { + return "", "", ErrGitNotAvailable + } + } + + // if we encounter an error we recognize, return a typed error + if exitErr, ok := err.(*exec.ExitError); ok { + return stdout, stderr, &GitError{ + Err: exitErr, + Stdout: stdout, + Stderr: stderr, + } + } + + // if we didn't encounter an ExitError or a timeout, simply return the error + return stdout, stderr, err +} + +// runCommand runs the command with the given timeout, and returns any errors encountered and whether +// the command timed out or not +func runCommand(cmd *exec.Cmd, timeout time.Duration) (error, bool) { + out := make(chan error) + go func() { + if err := cmd.Start(); err != nil { + klog.V(4).Infof("Error starting execution: %v", err) + } + out <- cmd.Wait() + }() + + if timeout == noCommandTimeout { + select { + case err := <-out: + if err != nil { + klog.V(4).Infof("Error executing command: %v", err) + } + return err, false + } + } else { + select { + case err := <-out: + if err != nil { + klog.V(4).Infof("Error executing command: %v", err) + } + return err, false + case <-time.After(timeout): + klog.V(4).Infof("Command execution timed out after %s", timeout) + return nil, true + } + } +} + +// TimeoutError is returned when the underlying Git coommand times out before finishing +type TimeoutError struct { + Err error +} + +func (e *TimeoutError) Error() string { + return e.Err.Error() +} + +// GitError is returned when the underlying Git command returns a non-zero exit code. +type GitError struct { + Err error + Stdout string + Stderr string +} + +func (e *GitError) Error() string { + if len(e.Stderr) > 0 { + return e.Stderr + } + return e.Err.Error() +} + +func IsExitCode(err error, exitCode int) bool { + switch t := err.(type) { + case *GitError: + return IsExitCode(t.Err, exitCode) + case *exec.ExitError: + if ws, ok := t.Sys().(syscall.WaitStatus); ok { + return ws.ExitStatus() == exitCode + } + return false + } + return false +} + +func gitBinary() string { + if runtime.GOOS == "windows" { + return "git.exe" + } + return "git" +} + +func IsGitInstalled() bool { + return isBinaryInstalled(gitBinary()) +} + +func isBinaryInstalled(name string) bool { + _, err := exec.LookPath(name) + return err == nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go new file mode 100644 index 0000000000..a740c2d9a0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go @@ -0,0 +1,138 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go new file mode 100644 index 0000000000..f3105a45b6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go new file mode 100644 index 0000000000..6e6e4347ea --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go @@ -0,0 +1,5 @@ +// digest is a copy from "github.com/docker/distribution/digest" that is kept because we want to avoid the godep, +// this package has no non-standard dependencies, and if it changes lots of other docker registry stuff breaks. +// Don't try this at home! +// Changes here require sign-off from openshift/api-reviewers and they will be rejected. +package digest diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go new file mode 100644 index 0000000000..22188ea98f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go @@ -0,0 +1,5 @@ +// reference is a copy from "github.com/docker/distribution/reference" that is kept because we want to avoid the godep, +// this package has no non-standard dependencies, and if it changes lots of other docker registry stuff breaks. +// Don't try this at home! +// Changes here require sign-off from openshift/api-reviewers and they will be rejected. +package reference diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go new file mode 100644 index 0000000000..eb498bc9d9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go @@ -0,0 +1,370 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + "path" + "strings" + + "github.com/openshift/library-go/pkg/image/internal/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return string(d) +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go new file mode 100644 index 0000000000..9a7d366bc8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go b/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go new file mode 100644 index 0000000000..d7f398c19a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go @@ -0,0 +1,245 @@ +package reference + +import ( + "net" + "net/url" + "strings" + + "github.com/openshift/library-go/pkg/image/internal/digest" + "github.com/openshift/library-go/pkg/image/internal/reference" +) + +// DockerImageReference points to a Docker image. +type DockerImageReference struct { + Registry string + Namespace string + Name string + Tag string + ID string +} + +const ( + // DockerDefaultRegistry is the value for the registry when none was provided. + DockerDefaultRegistry = "docker.io" + // DockerDefaultV1Registry is the host name of the default v1 registry + DockerDefaultV1Registry = "index." + DockerDefaultRegistry + // DockerDefaultV2Registry is the host name of the default v2 registry + DockerDefaultV2Registry = "registry-1." + DockerDefaultRegistry +) + +// Parse parses a Docker pull spec string into a +// DockerImageReference. +func Parse(spec string) (DockerImageReference, error) { + var ref DockerImageReference + + namedRef, err := reference.ParseNamed(spec) + if err != nil { + return ref, err + } + + name := namedRef.Name() + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ":.") && name[:i] != "localhost") { + ref.Name = name + } else { + ref.Registry, ref.Name = name[:i], name[i+1:] + } + + if named, ok := namedRef.(reference.NamedTagged); ok { + ref.Tag = named.Tag() + } + + if named, ok := namedRef.(reference.Canonical); ok { + ref.ID = named.Digest().String() + } + + // It's not enough just to use the reference.ParseNamed(). We have to fill + // ref.Namespace from ref.Name + if i := strings.IndexRune(ref.Name, '/'); i != -1 { + ref.Namespace, ref.Name = ref.Name[:i], ref.Name[i+1:] + } + + return ref, nil +} + +// Equal returns true if the other DockerImageReference is equivalent to the +// reference r. The comparison applies defaults to the Docker image reference, +// so that e.g., "foobar" equals "docker.io/library/foobar:latest". +func (r DockerImageReference) Equal(other DockerImageReference) bool { + defaultedRef := r.DockerClientDefaults() + otherDefaultedRef := other.DockerClientDefaults() + return defaultedRef == otherDefaultedRef +} + +// DockerClientDefaults sets the default values used by the Docker client. +func (r DockerImageReference) DockerClientDefaults() DockerImageReference { + if len(r.Registry) == 0 { + r.Registry = DockerDefaultRegistry + } + if len(r.Namespace) == 0 && IsRegistryDockerHub(r.Registry) { + r.Namespace = "library" + } + if len(r.Tag) == 0 { + r.Tag = "latest" + } + return r +} + +// Minimal reduces a DockerImageReference to its minimalist form. +func (r DockerImageReference) Minimal() DockerImageReference { + if r.Tag == "latest" { + r.Tag = "" + } + return r +} + +// AsRepository returns the reference without tags or IDs. +func (r DockerImageReference) AsRepository() DockerImageReference { + r.Tag = "" + r.ID = "" + return r +} + +// RepositoryName returns the registry relative name +func (r DockerImageReference) RepositoryName() string { + r.Tag = "" + r.ID = "" + r.Registry = "" + return r.Exact() +} + +// RegistryHostPort returns the registry hostname and the port. +// If the port is not specified in the registry hostname we default to 443. +// This will also default to Docker client defaults if the registry hostname is empty. +func (r DockerImageReference) RegistryHostPort(insecure bool) (string, string) { + registryHost := r.AsV2().DockerClientDefaults().Registry + if strings.Contains(registryHost, ":") { + hostname, port, _ := net.SplitHostPort(registryHost) + return hostname, port + } + if insecure { + return registryHost, "80" + } + return registryHost, "443" +} + +// RepositoryName returns the registry relative name +func (r DockerImageReference) RegistryURL() *url.URL { + return &url.URL{ + Scheme: "https", + Host: r.AsV2().Registry, + } +} + +// DaemonMinimal clears defaults that Docker assumes. +func (r DockerImageReference) DaemonMinimal() DockerImageReference { + switch r.Registry { + case DockerDefaultV1Registry, DockerDefaultV2Registry: + r.Registry = DockerDefaultRegistry + } + if IsRegistryDockerHub(r.Registry) && r.Namespace == "library" { + r.Namespace = "" + } + return r.Minimal() +} + +func (r DockerImageReference) AsV2() DockerImageReference { + switch r.Registry { + case DockerDefaultV1Registry, DockerDefaultRegistry: + r.Registry = DockerDefaultV2Registry + } + return r +} + +// MostSpecific returns the most specific image reference that can be constructed from the +// current ref, preferring an ID over a Tag. Allows client code dealing with both tags and IDs +// to get the most specific reference easily. +func (r DockerImageReference) MostSpecific() DockerImageReference { + if len(r.ID) == 0 { + return r + } + if _, err := digest.ParseDigest(r.ID); err == nil { + r.Tag = "" + return r + } + if len(r.Tag) == 0 { + r.Tag, r.ID = r.ID, "" + return r + } + return r +} + +// NameString returns the name of the reference with its tag or ID. +func (r DockerImageReference) NameString() string { + switch { + case len(r.Name) == 0: + return "" + case len(r.Tag) > 0: + return r.Name + ":" + r.Tag + case len(r.ID) > 0: + var ref string + if _, err := digest.ParseDigest(r.ID); err == nil { + // if it parses as a digest, its v2 pull by id + ref = "@" + r.ID + } else { + // if it doesn't parse as a digest, it's presumably a v1 registry by-id tag + ref = ":" + r.ID + } + return r.Name + ref + default: + return r.Name + } +} + +// Exact returns a string representation of the set fields on the DockerImageReference +func (r DockerImageReference) Exact() string { + name := r.NameString() + if len(name) == 0 { + return name + } + s := r.Registry + if len(s) > 0 { + s += "/" + } + + if len(r.Namespace) != 0 { + s += r.Namespace + "/" + } + return s + name +} + +// String converts a DockerImageReference to a Docker pull spec (which implies a default namespace +// according to V1 Docker registry rules). Use Exact() if you want no defaulting. +func (r DockerImageReference) String() string { + if len(r.Namespace) == 0 && IsRegistryDockerHub(r.Registry) { + r.Namespace = "library" + } + return r.Exact() +} + +// IsRegistryDockerHub returns true if the given registry name belongs to +// Docker hub. +func IsRegistryDockerHub(registry string) bool { + switch registry { + case DockerDefaultRegistry, DockerDefaultV1Registry, DockerDefaultV2Registry: + return true + default: + return false + } +} + +// DeepCopyInto writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy copies the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go b/vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go new file mode 100644 index 0000000000..87f4cad5b8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go @@ -0,0 +1,63 @@ +package networkapihelpers + +import ( + "fmt" + "strings" + + networkv1 "github.com/openshift/api/network/v1" +) + +type PodNetworkAction string + +const ( + // Acceptable values for ChangePodNetworkAnnotation + GlobalPodNetwork PodNetworkAction = "global" + JoinPodNetwork PodNetworkAction = "join" + IsolatePodNetwork PodNetworkAction = "isolate" +) + +var ( + ErrorPodNetworkAnnotationNotFound = fmt.Errorf("ChangePodNetworkAnnotation not found") +) + +// GetChangePodNetworkAnnotation fetches network change intent from NetNamespace +func GetChangePodNetworkAnnotation(netns *networkv1.NetNamespace) (PodNetworkAction, string, error) { + value, ok := netns.Annotations[networkv1.ChangePodNetworkAnnotation] + if !ok { + return PodNetworkAction(""), "", ErrorPodNetworkAnnotationNotFound + } + + args := strings.Split(value, ":") + switch PodNetworkAction(args[0]) { + case GlobalPodNetwork: + return GlobalPodNetwork, "", nil + case JoinPodNetwork: + if len(args) != 2 { + return PodNetworkAction(""), "", fmt.Errorf("invalid namespace for join pod network: %s", value) + } + namespace := args[1] + return JoinPodNetwork, namespace, nil + case IsolatePodNetwork: + return IsolatePodNetwork, "", nil + } + + return PodNetworkAction(""), "", fmt.Errorf("invalid ChangePodNetworkAnnotation: %s", value) +} + +// SetChangePodNetworkAnnotation sets network change intent on NetNamespace +func SetChangePodNetworkAnnotation(netns *networkv1.NetNamespace, action PodNetworkAction, params string) { + if netns.Annotations == nil { + netns.Annotations = make(map[string]string) + } + + value := string(action) + if len(params) != 0 { + value = fmt.Sprintf("%s:%s", value, params) + } + netns.Annotations[networkv1.ChangePodNetworkAnnotation] = value +} + +// DeleteChangePodNetworkAnnotation removes network change intent from NetNamespace +func DeleteChangePodNetworkAnnotation(netns *networkv1.NetNamespace) { + delete(netns.Annotations, networkv1.ChangePodNetworkAnnotation) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go new file mode 100644 index 0000000000..9a773c2d4c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go @@ -0,0 +1,120 @@ +package certrotation + +import ( + "crypto/x509" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/cert" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/certs" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" +) + +// CABundleRotation maintains a CA bundle config map, but adding new CA certs and removing expired old ones. +type CABundleRotation struct { + Namespace string + Name string + + Informer corev1informers.ConfigMapInformer + Lister corev1listers.ConfigMapLister + Client corev1client.ConfigMapsGetter + EventRecorder events.Recorder +} + +func (c CABundleRotation) ensureConfigMapCABundle(signingCertKeyPair *crypto.CA) ([]*x509.Certificate, error) { + // by this point we have current signing cert/key pair. We now need to make sure that the ca-bundle configmap has this cert and + // doesn't have any expired certs + originalCABundleConfigMap, err := c.Lister.ConfigMaps(c.Namespace).Get(c.Name) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + caBundleConfigMap := originalCABundleConfigMap.DeepCopy() + if apierrors.IsNotFound(err) { + // create an empty one + caBundleConfigMap = &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} + } + updatedCerts, err := manageCABundleConfigMap(caBundleConfigMap, signingCertKeyPair.Config.Certs[0]) + if err != nil { + return nil, err + } + if originalCABundleConfigMap == nil || originalCABundleConfigMap.Data == nil || !equality.Semantic.DeepEqual(originalCABundleConfigMap.Data, caBundleConfigMap.Data) { + c.EventRecorder.Eventf("CABundleUpdateRequired", "%q in %q requires a new cert", c.Name, c.Namespace) + LabelAsManagedConfigMap(caBundleConfigMap, CertificateTypeCABundle) + + actualCABundleConfigMap, modified, err := resourceapply.ApplyConfigMap(c.Client, c.EventRecorder, caBundleConfigMap) + if err != nil { + return nil, err + } + if modified { + klog.V(2).Infof("Updated ca-bundle.crt configmap %s/%s with:\n%s", certs.CertificateBundleToString(updatedCerts), caBundleConfigMap.Namespace, caBundleConfigMap.Name) + } + + caBundleConfigMap = actualCABundleConfigMap + } + + caBundle := caBundleConfigMap.Data["ca-bundle.crt"] + if len(caBundle) == 0 { + return nil, fmt.Errorf("configmap/%s -n%s missing ca-bundle.crt", caBundleConfigMap.Name, caBundleConfigMap.Namespace) + } + certificates, err := cert.ParseCertsPEM([]byte(caBundle)) + if err != nil { + return nil, err + } + + return certificates, nil +} + +// manageCABundleConfigMap adds the new certificate to the list of cabundles, eliminates duplicates, and prunes the list of expired +// certs to trust as signers +func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner *x509.Certificate) ([]*x509.Certificate, error) { + if caBundleConfigMap.Data == nil { + caBundleConfigMap.Data = map[string]string{} + } + + certificates := []*x509.Certificate{} + caBundle := caBundleConfigMap.Data["ca-bundle.crt"] + if len(caBundle) > 0 { + var err error + certificates, err = cert.ParseCertsPEM([]byte(caBundle)) + if err != nil { + return nil, err + } + } + certificates = append([]*x509.Certificate{currentSigner}, certificates...) + certificates = crypto.FilterExpiredCerts(certificates...) + + finalCertificates := []*x509.Certificate{} + // now check for duplicates. n^2, but super simple + for i := range certificates { + found := false + for j := range finalCertificates { + if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { + found = true + break + } + } + if !found { + finalCertificates = append(finalCertificates, certificates[i]) + } + } + + caBytes, err := crypto.EncodeCertificates(finalCertificates...) + if err != nil { + return nil, err + } + + caBundleConfigMap.Data["ca-bundle.crt"] = string(caBytes) + + return finalCertificates, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go new file mode 100644 index 0000000000..8f75dc7db9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go @@ -0,0 +1,196 @@ +package certrotation + +import ( + "fmt" + "time" + + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +const ( + // CertificateNotBeforeAnnotation contains the certificate expiration date in RFC3339 format. + CertificateNotBeforeAnnotation = "auth.openshift.io/certificate-not-before" + // CertificateNotAfterAnnotation contains the certificate expiration date in RFC3339 format. + CertificateNotAfterAnnotation = "auth.openshift.io/certificate-not-after" + // CertificateIssuer contains the common name of the certificate that signed another certificate. + CertificateIssuer = "auth.openshift.io/certificate-issuer" + // CertificateHostnames contains the hostnames used by a signer. + CertificateHostnames = "auth.openshift.io/certificate-hostnames" +) + +const workQueueKey = "key" + +// CertRotationController does: +// +// 1) continuously create a self-signed signing CA (via SigningRotation). +// It creates the next one when a given percentage of the validity of the old CA has passed. +// 2) maintain a CA bundle with all not yet expired CA certs. +// 3) continuously create a target cert and key signed by the latest signing CA +// It creates the next one when a given percentage of the validity of the previous cert has +// passed, or when a new CA has been created. +type CertRotationController struct { + name string + + SigningRotation SigningRotation + CABundleRotation CABundleRotation + TargetRotation TargetRotation + OperatorClient v1helpers.StaticPodOperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface +} + +func NewCertRotationController( + name string, + signingRotation SigningRotation, + caBundleRotation CABundleRotation, + targetRotation TargetRotation, + operatorClient v1helpers.StaticPodOperatorClient, +) (*CertRotationController, error) { + c := &CertRotationController{ + name: name, + + SigningRotation: signingRotation, + CABundleRotation: caBundleRotation, + TargetRotation: targetRotation, + OperatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + } + + signingRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + caBundleRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + targetRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, signingRotation.Informer.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, caBundleRotation.Informer.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, targetRotation.Informer.Informer().HasSynced) + + return c, nil +} + +func (c CertRotationController) sync() error { + syncErr := c.syncWorker() + + condition := operatorv1.OperatorCondition{ + Type: "CertRotation_" + c.name + "_Degraded", + Status: operatorv1.ConditionFalse, + } + if syncErr != nil { + condition.Status = operatorv1.ConditionTrue + condition.Reason = "RotationError" + condition.Message = syncErr.Error() + } + if _, _, updateErr := v1helpers.UpdateStaticPodStatus(c.OperatorClient, v1helpers.UpdateStaticPodConditionFn(condition)); updateErr != nil { + return updateErr + } + + return syncErr +} + +func (c CertRotationController) syncWorker() error { + signingCertKeyPair, err := c.SigningRotation.ensureSigningCertKeyPair() + if err != nil { + return err + } + + cabundleCerts, err := c.CABundleRotation.ensureConfigMapCABundle(signingCertKeyPair) + if err != nil { + return err + } + + if err := c.TargetRotation.ensureTargetCertKeyPair(signingCertKeyPair, cabundleCerts); err != nil { + return err + } + + return nil +} + +func (c *CertRotationController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting CertRotationController - %q", c.name) + defer klog.Infof("Shutting down CertRotationController - %q", c.name) + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start a time based thread to ensure we stay up to date + go wait.Until(func() { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + c.queue.Add(workQueueKey) + select { + case <-ticker.C: + case <-stopCh: + return + } + } + + }, time.Minute, stopCh) + + // if we have a need to force rechecking the cert, use this channel to do it. + if refresher, ok := c.TargetRotation.CertCreator.(TargetCertRechecker); ok { + targetRefresh := refresher.RecheckChannel() + go wait.Until(func() { + for { + select { + case <-targetRefresh: + c.queue.Add(workQueueKey) + case <-stopCh: + return + } + } + + }, time.Minute, stopCh) + } + + <-stopCh +} + +func (c *CertRotationController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *CertRotationController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v: %v failed with: %v", c.name, dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *CertRotationController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go new file mode 100644 index 0000000000..5f88d64063 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go @@ -0,0 +1,40 @@ +package certrotation + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +// GetCertRotationScale The normal scale is based on a day. The value returned by this function +// is used to scale rotation durations instead of a day, so you can set it shorter. +func GetCertRotationScale(client kubernetes.Interface, namespace string) (time.Duration, error) { + certRotationScale := time.Duration(0) + err := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { + certRotationConfig, err := client.CoreV1().ConfigMaps(namespace).Get("unsupported-cert-rotation-config", metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true, nil + } + return false, err + } + if value, ok := certRotationConfig.Data["base"]; ok { + certRotationScale, err = time.ParseDuration(value) + if err != nil { + return false, err + } + } + return true, nil + }) + if err != nil { + return 0, err + } + if certRotationScale > 24*time.Hour { + return 0, fmt.Errorf("scale longer than 24h is not allowed: %v", certRotationScale) + } + return certRotationScale, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go new file mode 100644 index 0000000000..9c0df4ce54 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go @@ -0,0 +1,61 @@ +package certrotation + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // ManagedCertificateTypeLabelName marks config map or secret as object that contains managed certificates. + // This groups all objects that store certs and allow easy query to get them all. + // The value of this label should be set to "true". + ManagedCertificateTypeLabelName = "auth.openshift.io/managed-certificate-type" +) + +type CertificateType string + +var ( + CertificateTypeCABundle CertificateType = "ca-bundle" + CertificateTypeSigner CertificateType = "signer" + CertificateTypeTarget CertificateType = "target" + CertificateTypeUnknown CertificateType = "unknown" +) + +// LabelAsManagedConfigMap add label indicating the given config map contains certificates +// that are managed. +func LabelAsManagedConfigMap(config *v1.ConfigMap, certificateType CertificateType) { + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[ManagedCertificateTypeLabelName] = string(certificateType) +} + +// LabelAsManagedConfigMap add label indicating the given secret contains certificates +// that are managed. +func LabelAsManagedSecret(secret *v1.Secret, certificateType CertificateType) { + if secret.Labels == nil { + secret.Labels = map[string]string{} + } + secret.Labels[ManagedCertificateTypeLabelName] = string(certificateType) +} + +// CertificateTypeFromObject returns the CertificateType based on the annotations of the object. +func CertificateTypeFromObject(obj runtime.Object) (CertificateType, error) { + accesor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + actualLabels := accesor.GetLabels() + if actualLabels == nil { + return CertificateTypeUnknown, nil + } + + t := CertificateType(actualLabels[ManagedCertificateTypeLabelName]) + switch t { + case CertificateTypeCABundle, CertificateTypeSigner, CertificateTypeTarget: + return t, nil + default: + return CertificateTypeUnknown, nil + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go new file mode 100644 index 0000000000..56ce2a3307 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go @@ -0,0 +1,136 @@ +package certrotation + +import ( + "bytes" + "fmt" + "time" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// SigningRotation rotates a self-signed signing CA stored in a secret. It creates a new one when +// of the lifetime of the old CA has passed. +type SigningRotation struct { + Namespace string + Name string + Validity time.Duration + Refresh time.Duration + + Informer corev1informers.SecretInformer + Lister corev1listers.SecretLister + Client corev1client.SecretsGetter + EventRecorder events.Recorder +} + +func (c SigningRotation) ensureSigningCertKeyPair() (*crypto.CA, error) { + originalSigningCertKeyPairSecret, err := c.Lister.Secrets(c.Namespace).Get(c.Name) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + signingCertKeyPairSecret := originalSigningCertKeyPairSecret.DeepCopy() + if apierrors.IsNotFound(err) { + // create an empty one + signingCertKeyPairSecret = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} + } + signingCertKeyPairSecret.Type = corev1.SecretTypeTLS + + if reason := needNewSigningCertKeyPair(signingCertKeyPairSecret.Annotations, c.Refresh); len(reason) > 0 { + c.EventRecorder.Eventf("SignerUpdateRequired", "%q in %q requires a new signing cert/key pair: %v", c.Name, c.Namespace, reason) + if err := setSigningCertKeyPairSecret(signingCertKeyPairSecret, c.Validity); err != nil { + return nil, err + } + + LabelAsManagedSecret(signingCertKeyPairSecret, CertificateTypeSigner) + + actualSigningCertKeyPairSecret, _, err := resourceapply.ApplySecret(c.Client, c.EventRecorder, signingCertKeyPairSecret) + if err != nil { + return nil, err + } + signingCertKeyPairSecret = actualSigningCertKeyPairSecret + } + // at this point, the secret has the correct signer, so we should read that signer to be able to sign + signingCertKeyPair, err := crypto.GetCAFromBytes(signingCertKeyPairSecret.Data["tls.crt"], signingCertKeyPairSecret.Data["tls.key"]) + if err != nil { + return nil, err + } + + return signingCertKeyPair, nil +} + +func needNewSigningCertKeyPair(annotations map[string]string, refresh time.Duration) string { + notBefore, notAfter, reason := getValidityFromAnnotations(annotations) + if len(reason) > 0 { + return reason + } + + maxWait := notAfter.Sub(notBefore) / 5 + latestTime := notAfter.Add(-maxWait) + if time.Now().After(latestTime) { + return fmt.Sprintf("past its latest possible time %v", latestTime) + } + + refreshTime := notBefore.Add(refresh) + if time.Now().After(refreshTime) { + return fmt.Sprintf("past its refresh time %v", refreshTime) + } + + return "" +} + +func getValidityFromAnnotations(annotations map[string]string) (notBefore time.Time, notAfter time.Time, reason string) { + notAfterString := annotations[CertificateNotAfterAnnotation] + if len(notAfterString) == 0 { + return notBefore, notAfter, "missing notAfter" + } + notAfter, err := time.Parse(time.RFC3339, notAfterString) + if err != nil { + return notBefore, notAfter, fmt.Sprintf("bad expiry: %q", notAfterString) + } + notBeforeString := annotations[CertificateNotBeforeAnnotation] + if len(notAfterString) == 0 { + return notBefore, notAfter, "missing notBefore" + } + notBefore, err = time.Parse(time.RFC3339, notBeforeString) + if err != nil { + return notBefore, notAfter, fmt.Sprintf("bad expiry: %q", notBeforeString) + } + + return notBefore, notAfter, "" +} + +// setSigningCertKeyPairSecret creates a new signing cert/key pair and sets them in the secret +func setSigningCertKeyPairSecret(signingCertKeyPairSecret *corev1.Secret, validity time.Duration) error { + signerName := fmt.Sprintf("%s_%s@%d", signingCertKeyPairSecret.Namespace, signingCertKeyPairSecret.Name, time.Now().Unix()) + ca, err := crypto.MakeSelfSignedCAConfigForDuration(signerName, validity) + if err != nil { + return err + } + + certBytes := &bytes.Buffer{} + keyBytes := &bytes.Buffer{} + if err := ca.WriteCertConfig(certBytes, keyBytes); err != nil { + return err + } + + if signingCertKeyPairSecret.Annotations == nil { + signingCertKeyPairSecret.Annotations = map[string]string{} + } + if signingCertKeyPairSecret.Data == nil { + signingCertKeyPairSecret.Data = map[string][]byte{} + } + signingCertKeyPairSecret.Data["tls.crt"] = certBytes.Bytes() + signingCertKeyPairSecret.Data["tls.key"] = keyBytes.Bytes() + signingCertKeyPairSecret.Annotations[CertificateNotAfterAnnotation] = ca.Certs[0].NotAfter.Format(time.RFC3339) + signingCertKeyPairSecret.Annotations[CertificateNotBeforeAnnotation] = ca.Certs[0].NotBefore.Format(time.RFC3339) + signingCertKeyPairSecret.Annotations[CertificateIssuer] = ca.Certs[0].Issuer.CommonName + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go new file mode 100644 index 0000000000..48d6efd8d5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go @@ -0,0 +1,267 @@ +package certrotation + +import ( + "crypto/x509" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + + "github.com/openshift/library-go/pkg/certs" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// TargetRotation rotates a key and cert signed by a CA. It creates a new one when +// of the lifetime of the old cert has passed, or if the common name of the CA changes. +type TargetRotation struct { + Namespace string + Name string + Validity time.Duration + Refresh time.Duration + + CertCreator TargetCertCreator + + Informer corev1informers.SecretInformer + Lister corev1listers.SecretLister + Client corev1client.SecretsGetter + EventRecorder events.Recorder +} + +type TargetCertCreator interface { + NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) + NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration) string + // SetAnnotations gives an option to override or set additional annotations + SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string +} + +type TargetCertRechecker interface { + RecheckChannel() <-chan struct{} +} + +func (c TargetRotation) ensureTargetCertKeyPair(signingCertKeyPair *crypto.CA, caBundleCerts []*x509.Certificate) error { + // at this point our trust bundle has been updated. We don't know for sure that consumers have updated, but that's why we have a second + // validity percentage. We always check to see if we need to sign. Often we are signing with an old key or we have no target + // and need to mint one + // TODO do the cross signing thing, but this shows the API consumers want and a very simple impl. + originalTargetCertKeyPairSecret, err := c.Lister.Secrets(c.Namespace).Get(c.Name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + targetCertKeyPairSecret := originalTargetCertKeyPairSecret.DeepCopy() + if apierrors.IsNotFound(err) { + // create an empty one + targetCertKeyPairSecret = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} + } + targetCertKeyPairSecret.Type = corev1.SecretTypeTLS + + if reason := needNewTargetCertKeyPair(targetCertKeyPairSecret.Annotations, signingCertKeyPair, caBundleCerts, c.Refresh); len(reason) > 0 { + c.EventRecorder.Eventf("TargetUpdateRequired", "%q in %q requires a new target cert/key pair: %v", c.Name, c.Namespace, reason) + if err := setTargetCertKeyPairSecret(targetCertKeyPairSecret, c.Validity, signingCertKeyPair, c.CertCreator); err != nil { + return err + } + + LabelAsManagedSecret(targetCertKeyPairSecret, CertificateTypeTarget) + + actualTargetCertKeyPairSecret, _, err := resourceapply.ApplySecret(c.Client, c.EventRecorder, targetCertKeyPairSecret) + if err != nil { + return err + } + targetCertKeyPairSecret = actualTargetCertKeyPairSecret + } + + return nil +} + +func needNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration) string { + if reason := needNewTargetCertKeyPairForTime(annotations, signer, refresh); len(reason) > 0 { + return reason + } + + // check the signer common name against all the common names in our ca bundle so we don't refresh early + signerCommonName := annotations[CertificateIssuer] + if len(signerCommonName) == 0 { + return "missing issuer name" + } + for _, caCert := range caBundleCerts { + if signerCommonName == caCert.Subject.CommonName { + return "" + } + } + + return fmt.Sprintf("issuer %q, not in ca bundle:\n%s", signerCommonName, certs.CertificateBundleToString(caBundleCerts)) +} + +// needNewTargetCertKeyPairForTime returns true when +// 1. when notAfter or notBefore is missing in the annotation +// 2. when notAfter or notBefore is malformed +// 3. when now is after the notAfter +// 4. when now is after notAfter+refresh AND the signer has been valid +// for more than 5% of the "extra" time we renew the target +// +//in other words, we rotate if +// +//our old CA is gone from the bundle (then we are pretty late to the renewal party) +//or the cert expired (then we are also pretty late) +//or we are over the renewal percentage of the validity, but only if the new CA at least 10% into its age. +//Maybe worth a go doc. +// +//So in general we need to see a signing CA at least aged 10% within 1-percentage of the cert validity. +// +//Hence, if the CAs are rotated too fast (like CA percentage around 10% or smaller), we will not hit the time to make use of the CA. Or if the cert renewal percentage is at 90%, there is not much time either. +// +//So with a cert percentage of 75% and equally long CA and cert validities at the worst case we start at 85% of the cert to renew, trying again every minute. +func needNewTargetCertKeyPairForTime(annotations map[string]string, signer *crypto.CA, refresh time.Duration) string { + notBefore, notAfter, reason := getValidityFromAnnotations(annotations) + if len(reason) > 0 { + return reason + } + + maxWait := notAfter.Sub(notBefore) / 5 + latestTime := notAfter.Add(-maxWait) + if time.Now().After(latestTime) { + return fmt.Sprintf("past its latest possible time %v", latestTime) + } + + // If Certificate is past its refresh time, we may have action to take. We only do this if the signer is old enough. + refreshTime := notBefore.Add(refresh) + if time.Now().After(refreshTime) { + // make sure the signer has been valid for more than 10% of the target's refresh time. + timeToWaitForTrustRotation := refresh / 10 + if time.Now().After(signer.Config.Certs[0].NotBefore.Add(time.Duration(timeToWaitForTrustRotation))) { + return fmt.Sprintf("past its refresh time %v", refreshTime) + } + } + + return "" +} + +// setTargetCertKeyPairSecret creates a new cert/key pair and sets them in the secret. Only one of client, serving, or signer rotation may be specified. +// TODO refactor with an interface for actually signing and move the one-of check higher in the stack. +func setTargetCertKeyPairSecret(targetCertKeyPairSecret *corev1.Secret, validity time.Duration, signer *crypto.CA, certCreator TargetCertCreator) error { + if targetCertKeyPairSecret.Annotations == nil { + targetCertKeyPairSecret.Annotations = map[string]string{} + } + if targetCertKeyPairSecret.Data == nil { + targetCertKeyPairSecret.Data = map[string][]byte{} + } + + // our annotation is based on our cert validity, so we want to make sure that we don't specify something past our signer + targetValidity := validity + remainingSignerValidity := signer.Config.Certs[0].NotAfter.Sub(time.Now()) + if remainingSignerValidity < validity { + targetValidity = remainingSignerValidity + } + + certKeyPair, err := certCreator.NewCertificate(signer, targetValidity) + if err != nil { + return err + } + + targetCertKeyPairSecret.Data["tls.crt"], targetCertKeyPairSecret.Data["tls.key"], err = certKeyPair.GetPEMBytes() + if err != nil { + return err + } + targetCertKeyPairSecret.Annotations[CertificateNotAfterAnnotation] = certKeyPair.Certs[0].NotAfter.Format(time.RFC3339) + targetCertKeyPairSecret.Annotations[CertificateNotBeforeAnnotation] = certKeyPair.Certs[0].NotBefore.Format(time.RFC3339) + targetCertKeyPairSecret.Annotations[CertificateIssuer] = certKeyPair.Certs[0].Issuer.CommonName + certCreator.SetAnnotations(certKeyPair, targetCertKeyPairSecret.Annotations) + + return nil +} + +type ClientRotation struct { + UserInfo user.Info +} + +func (r *ClientRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) { + return signer.MakeClientCertificateForDuration(r.UserInfo, validity) +} + +func (r *ClientRotation) NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration) string { + return needNewTargetCertKeyPair(annotations, signer, caBundleCerts, refresh) +} + +func (r *ClientRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string { + return annotations +} + +type ServingRotation struct { + Hostnames ServingHostnameFunc + CertificateExtensionFn []crypto.CertificateExtensionFunc + HostnamesChanged <-chan struct{} +} + +func (r *ServingRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) { + if len(r.Hostnames()) == 0 { + return nil, fmt.Errorf("no hostnames set") + } + return signer.MakeServerCertForDuration(sets.NewString(r.Hostnames()...), validity, r.CertificateExtensionFn...) +} + +func (r *ServingRotation) RecheckChannel() <-chan struct{} { + return r.HostnamesChanged +} + +func (r *ServingRotation) NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration) string { + reason := needNewTargetCertKeyPair(annotations, signer, caBundleCerts, refresh) + if len(reason) > 0 { + return reason + } + + return r.missingHostnames(annotations) +} + +func (r *ServingRotation) missingHostnames(annotations map[string]string) string { + existingHostnames := sets.NewString(strings.Split(annotations[CertificateHostnames], ",")...) + requiredHostnames := sets.NewString(r.Hostnames()...) + if !existingHostnames.Equal(requiredHostnames) { + existingNotRequired := existingHostnames.Difference(requiredHostnames) + requiredNotExisting := requiredHostnames.Difference(existingHostnames) + return fmt.Sprintf("%q are existing and not required, %q are required and not existing", strings.Join(existingNotRequired.List(), ","), strings.Join(requiredNotExisting.List(), ",")) + } + + return "" +} + +func (r *ServingRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string { + hostnames := sets.String{} + for _, ip := range cert.Certs[0].IPAddresses { + hostnames.Insert(ip.String()) + } + for _, dnsName := range cert.Certs[0].DNSNames { + hostnames.Insert(dnsName) + } + + // List does a sort so that we have a consistent representation + annotations[CertificateHostnames] = strings.Join(hostnames.List(), ",") + return annotations +} + +type ServingHostnameFunc func() []string + +type SignerRotation struct { + SignerName string +} + +func (r *SignerRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) { + signerName := fmt.Sprintf("%s_@%d", r.SignerName, time.Now().Unix()) + return crypto.MakeCAConfigForDuration(signerName, validity, signer) +} + +func (r *SignerRotation) NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration) string { + return needNewTargetCertKeyPair(annotations, signer, caBundleCerts, refresh) +} + +func (r *SignerRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string { + return annotations +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt new file mode 100644 index 0000000000..b6140c7abb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICMjCCAdmgAwIBAgIUdTpx2/qycBZJltbEdfTyfKyJjG0wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTIwODAwWhcN +MTgwNzMwMTIwOTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABMlJR5tWK7vgCytCxBQov1xNp+R9RG2wI1w9 +SXIn+Za97Nf6krdyUDd+P6QSSJDkRTQZDsGiCpJhgd5kAzFNUkajgZgwgZUwDgYD +VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw +HQYDVR0OBBYEFOERFpshmIXspqXoox9gnSFGmm3PMB8GA1UdIwQYMBaAFCtdC7xd +NJKjmyiwhZJH7LBLOLrgMCAGA1UdEQQZMBeCFWV0Y2Rwcm94eS10ZXN0cy5sb2Nh +bDAKBggqhkjOPQQDAgNHADBEAiAvsq9L5uk0jg3v2z1xemAUwPXrEIAcbJhXFfC0 +QmVGGgIgFT9d/inKJcm/NfAgDGkoXSvHGv0NKAZpR32Dqriobh4= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt new file mode 100644 index 0000000000..b321982a74 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUQ0hq1Lmd6ujao+8Iy6LfpMdyNI8wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMDAwWhcN +MjMwNzI5MTExMDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABGoowUY2eQdvaHG4S/UMYD6mjs6/P7mmhizl +KWO03gq2eVSsbiYAnCJok3o2WQ01GtcS6bOUJ1DOG0gLTRfQ/lWjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQmqCeN+suT +0JjgSxtCqTln7zonHjAfBgNVHSMEGDAWgBQmqCeN+suT0JjgSxtCqTln7zonHjAK +BggqhkjOPQQDAgNIADBFAiAUKV8vkiIoCiqtHQsp3PrUUV3He2B9K1tQgA8loTa+ +IQIhANPbCDVoPSFsX0I5iG/DQl/MmTo/tlsmNkN99j1j2JIM +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUU8ZsD37pcA1UYkgwhR6d/KjdGeAwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABLupsOF50q6GE7z2US77t5iLGe9wdOFwHssC +jUjCEGvJ/d2sGMxdiABJrrB8gau6TilrJCy9ZTYj56fzdReUnsKjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRhaKyklrnI +wd2kg84t1D8CvDVtdjAfBgNVHSMEGDAWgBRhaKyklrnIwd2kg84t1D8CvDVtdjAK +BggqhkjOPQQDAgNIADBFAiAOCYqtOamRapNc+XxR7IFzlr7Si7EvjQ+ej5SKHb7g +rgIhAIBd1dtMc0KJSFsoxnQZailkFi5Nlea2eHU1wEDKVb40 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/zCCAaagAwIBAgIUVCSMefpK8uxDKy87jKnwc97DseIwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABOhGVuxW0nEQ5REqQdRF1eJ7OUOdXB/oDJed +Jr1ezcyhJyCRvD9DfadSBvMHFyzw7ssBIIMm4C3Eufj96M3tSACjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBTLR9qOF3Hh +if8KUbkrRYUK13xSSDAfBgNVHSMEGDAWgBTLR9qOF3Hhif8KUbkrRYUK13xSSDAK +BggqhkjOPQQDAgNHADBEAiAFD2zRXnp40wVeffwpkU+ToFF6Nts/HJk02iMr/+km +RgIgRLZxonlkyLlUHucMKC2V+4UJ9akEbu/bhCXKuQb2DgY= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt new file mode 100644 index 0000000000..862bdbc2df --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUWke4fSfaCH+2MLSFeTHBpoi+h1YwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTA1MDAwWhcN +MjMwNzI5MTA1MDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABHoqBfTXFdWRATfdrr/v5UriZBxmzL5aiwLZ +VRUg2UZNnoH2JLUcDkqx3IQakjoVijweiQeqxAai3mxjtgxbh+ajZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgDE3RpOiH +Gv7AEnYKRk46zVIkbzAfBgNVHSMEGDAWgBSgDE3RpOiHGv7AEnYKRk46zVIkbzAK +BggqhkjOPQQDAgNIADBFAiA3Gg/gwiEfjclpQYyd3qTgdCWzud8GKRdjVK3Z2BXW +swIhANMuxi0Y41mwcmh3a2icpdeGHGyGNdNDe8uF+5csuNUp +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key new file mode 100644 index 0000000000..83cf18be62 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIC+UyR59JEbt/qjWZG/87ZYzk0pOgTBmpx5R0w6uG66JoAoGCCqGSM49 +AwEHoUQDQgAEeioF9NcV1ZEBN92uv+/lSuJkHGbMvlqLAtlVFSDZRk2egfYktRwO +SrHchBqSOhWKPB6JB6rEBqLebGO2DFuH5g== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go new file mode 100644 index 0000000000..65846c975a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go @@ -0,0 +1,157 @@ +package cloudprovider + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" +) + +const ( + cloudProviderConfFilePath = "/etc/kubernetes/static-pod-resources/configmaps/cloud-config/%s" + configNamespace = "openshift-config" +) + +// InfrastructureLister lists infrastrucre information and allows resources to be synced +type InfrastructureLister interface { + InfrastructureLister() configlistersv1.InfrastructureLister + ResourceSyncer() resourcesynccontroller.ResourceSyncer +} + +// NewCloudProviderObserver returns a new cloudprovider observer for syncing cloud provider specific +// information to controller-manager and api-server. +func NewCloudProviderObserver(targetNamespaceName string, cloudProviderNamePath, cloudProviderConfigPath []string) configobserver.ObserveConfigFunc { + cloudObserver := &cloudProviderObserver{ + targetNamespaceName: targetNamespaceName, + cloudProviderNamePath: cloudProviderNamePath, + cloudProviderConfigPath: cloudProviderConfigPath, + } + return cloudObserver.ObserveCloudProviderNames +} + +type cloudProviderObserver struct { + targetNamespaceName string + cloudProviderNamePath []string + cloudProviderConfigPath []string +} + +// ObserveCloudProviderNames observes the cloud provider from the global cluster infrastructure resource. +func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + listers := genericListers.(InfrastructureLister) + var errs []error + cloudProvidersPath := c.cloudProviderNamePath + cloudProviderConfPath := c.cloudProviderConfigPath + previouslyObservedConfig := map[string]interface{}{} + + existingCloudConfig, _, err := unstructured.NestedStringSlice(existingConfig, cloudProviderConfPath...) + if err != nil { + return previouslyObservedConfig, append(errs, err) + } + + if currentCloudProvider, _, _ := unstructured.NestedStringSlice(existingConfig, cloudProvidersPath...); len(currentCloudProvider) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, currentCloudProvider, cloudProvidersPath...); err != nil { + errs = append(errs, err) + } + } + + if len(existingCloudConfig) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, existingCloudConfig, cloudProviderConfPath...); err != nil { + errs = append(errs, err) + } + } + + observedConfig := map[string]interface{}{} + + infrastructure, err := listers.InfrastructureLister().Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveCloudProviderNames", "Required infrastructures.%s/cluster not found", configv1.GroupName) + return observedConfig, errs + } + if err != nil { + return previouslyObservedConfig, errs + } + + cloudProvider := getPlatformName(infrastructure.Status.Platform, recorder) + if len(cloudProvider) > 0 { + if err := unstructured.SetNestedStringSlice(observedConfig, []string{cloudProvider}, cloudProvidersPath...); err != nil { + errs = append(errs, err) + } + } + + sourceCloudConfigMap := infrastructure.Spec.CloudConfig.Name + sourceCloudConfigNamespace := configNamespace + sourceLocation := resourcesynccontroller.ResourceLocation{ + Namespace: sourceCloudConfigNamespace, + Name: sourceCloudConfigMap, + } + // we set cloudprovider configmap values only for vsphere. + if cloudProvider != "vsphere" { + sourceCloudConfigMap = "" + } + + if len(sourceCloudConfigMap) == 0 { + sourceLocation = resourcesynccontroller.ResourceLocation{} + } + + err = listers.ResourceSyncer().SyncConfigMap( + resourcesynccontroller.ResourceLocation{ + Namespace: c.targetNamespaceName, + Name: "cloud-config", + }, + sourceLocation) + + if err != nil { + errs = append(errs, err) + return observedConfig, errs + } + + if len(sourceCloudConfigMap) == 0 { + return observedConfig, errs + } + + // usually key will be simply config but we should refer it just in case + staticCloudConfFile := fmt.Sprintf(cloudProviderConfFilePath, infrastructure.Spec.CloudConfig.Key) + + if err := unstructured.SetNestedStringSlice(observedConfig, []string{staticCloudConfFile}, cloudProviderConfPath...); err != nil { + recorder.Warningf("ObserveCloudProviderNames", "Failed setting cloud-config : %v", err) + errs = append(errs, err) + } + + if !equality.Semantic.DeepEqual(existingCloudConfig, []string{staticCloudConfFile}) { + recorder.Eventf("ObserveCloudProviderNamesChanges", "CloudProvider config file changed to %s", staticCloudConfFile) + } + + return observedConfig, errs +} + +func getPlatformName(platformType configv1.PlatformType, recorder events.Recorder) string { + cloudProvider := "" + switch platformType { + case "": + recorder.Warningf("ObserveCloudProvidersFailed", "Required status.platform field is not set in infrastructures.%s/cluster", configv1.GroupName) + case configv1.AWSPlatformType: + cloudProvider = "aws" + case configv1.AzurePlatformType: + cloudProvider = "azure" + case configv1.VSpherePlatformType: + cloudProvider = "vsphere" + case configv1.LibvirtPlatformType: + case configv1.OpenStackPlatformType: + // TODO(flaper87): Enable this once we've figured out a way to write the cloud provider config in the master nodes + //cloudProvider = "openstack" + case configv1.NonePlatformType: + default: + // the new doc on the infrastructure fields requires that we treat an unrecognized thing the same bare metal. + // TODO find a way to indicate to the user that we didn't honor their choice + recorder.Warningf("ObserveCloudProvidersFailed", fmt.Sprintf("No recognized cloud provider platform found in infrastructures.%s/cluster.status.platform", configv1.GroupName)) + } + return cloudProvider +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go new file mode 100644 index 0000000000..2c566647fd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -0,0 +1,192 @@ +package configobserver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/imdario/mergo" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const operatorStatusTypeConfigObservationDegraded = "ConfigObservationDegraded" +const configObserverWorkKey = "key" + +// Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type +type Listers interface { + // ResourceSyncer can be used to copy content from one namespace to another + ResourceSyncer() resourcesynccontroller.ResourceSyncer + PreRunHasSynced() []cache.InformerSynced +} + +// ObserveConfigFunc observes configuration and returns the observedConfig. This function should not return an +// observedConfig that would cause the service being managed by the operator to crash. For example, if a required +// configuration key cannot be observed, consider reusing the configuration key's previous value. Errors that occur +// while attempting to generate the observedConfig should be returned in the errs slice. +type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) + +type ConfigObserver struct { + + // observers are called in an undefined order and their results are merged to + // determine the observed configuration. + observers []ObserveConfigFunc + + operatorClient v1helpers.OperatorClient + // listers are used by config observers to retrieve necessary resources + listers Listers + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +func NewConfigObserver( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, + listers Listers, + observers ...ObserveConfigFunc, +) *ConfigObserver { + return &ConfigObserver{ + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("config-observer"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ConfigObserver"), + + observers: observers, + listers: listers, + } +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c ConfigObserver) sync() error { + originalSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + spec := originalSpec.DeepCopy() + + // don't worry about errors. If we can't decode, we'll simply stomp over the field. + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(spec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil { + klog.V(4).Infof("decode of existing config failed with error: %v", err) + } + + var errs []error + var observedConfigs []map[string]interface{} + for _, i := range rand.Perm(len(c.observers)) { + var currErrs []error + observedConfig, currErrs := c.observers[i](c.listers, c.eventRecorder, existingConfig) + observedConfigs = append(observedConfigs, observedConfig) + errs = append(errs, currErrs...) + } + + mergedObservedConfig := map[string]interface{}{} + for _, observedConfig := range observedConfigs { + if err := mergo.Merge(&mergedObservedConfig, observedConfig); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + reverseMergedObservedConfig := map[string]interface{}{} + for i := len(observedConfigs) - 1; i >= 0; i-- { + if err := mergo.Merge(&reverseMergedObservedConfig, observedConfigs[i]); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + if !equality.Semantic.DeepEqual(mergedObservedConfig, reverseMergedObservedConfig) { + errs = append(errs, errors.New("non-deterministic config observation detected")) + } + + if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) { + c.eventRecorder.Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig)) + if _, _, err := v1helpers.UpdateSpec(c.operatorClient, v1helpers.UpdateObservedConfigFn(mergedObservedConfig)); err != nil { + // At this point we failed to write the updated config. If we are permanently broken, do not pile the errors from observers + // but instead reset the errors and only report single error condition. + errs = []error{fmt.Errorf("error writing updated observed config: %v", err)} + c.eventRecorder.Warningf("ObservedConfigWriteError", "Failed to write observed config: %v", err) + } + } + configError := v1helpers.NewMultiLineAggregate(errs) + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: operatorStatusTypeConfigObservationDegraded, + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +func (c *ConfigObserver) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting ConfigObserver") + defer klog.Infof("Shutting down ConfigObserver") + if !cache.WaitForCacheSync(stopCh, c.listers.PreRunHasSynced()...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *ConfigObserver) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ConfigObserver) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *ConfigObserver) EventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(configObserverWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(configObserverWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(configObserverWorkKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go new file mode 100644 index 0000000000..dd27886694 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go @@ -0,0 +1,97 @@ +package featuregates + +import ( + "fmt" + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +type FeatureGateLister interface { + FeatureGateLister() configlistersv1.FeatureGateLister +} + +func NewObserveFeatureFlagsFunc(knownFeatures sets.String, configPath []string) configobserver.ObserveConfigFunc { + return (&featureFlags{ + allowAll: len(knownFeatures) == 0, + knownFeatures: knownFeatures, + configPath: configPath, + }).ObserveFeatureFlags +} + +type featureFlags struct { + allowAll bool + knownFeatures sets.String + configPath []string +} + +// ObserveFeatureFlags fills in --feature-flags for the kube-apiserver +func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + listers := genericListers.(FeatureGateLister) + errs := []error{} + prevObservedConfig := map[string]interface{}{} + + currentConfigValue, _, err := unstructured.NestedStringSlice(existingConfig, f.configPath...) + if err != nil { + errs = append(errs, err) + } + if len(currentConfigValue) > 0 { + if err := unstructured.SetNestedStringSlice(prevObservedConfig, currentConfigValue, f.configPath...); err != nil { + errs = append(errs, err) + } + } + + observedConfig := map[string]interface{}{} + configResource, err := listers.FeatureGateLister().Get("cluster") + // if we have no featuregate, then the installer and MCO probably still have way to reconcile certain custom resources + // we will assume that this means the same as default and hope for the best + if apierrors.IsNotFound(err) { + configResource = &configv1.FeatureGate{ + Spec: configv1.FeatureGateSpec{ + FeatureSet: configv1.Default, + }, + } + } else if err != nil { + errs = append(errs, err) + return prevObservedConfig, errs + } + + var newConfigValue []string + if featureSet, ok := configv1.FeatureSets[configResource.Spec.FeatureSet]; ok { + for _, enable := range featureSet.Enabled { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(enable) { + continue + } + newConfigValue = append(newConfigValue, enable+"=true") + } + for _, disable := range featureSet.Disabled { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(disable) { + continue + } + newConfigValue = append(newConfigValue, disable+"=false") + } + } else { + errs = append(errs, fmt.Errorf(".spec.featureSet %q not found", featureSet)) + return prevObservedConfig, errs + } + if !reflect.DeepEqual(currentConfigValue, newConfigValue) { + recorder.Eventf("ObserveFeatureFlagsUpdated", "Updated %v to %s", strings.Join(f.configPath, "."), strings.Join(newConfigValue, ",")) + } + + if err := unstructured.SetNestedStringSlice(observedConfig, newConfigValue, f.configPath...); err != nil { + recorder.Warningf("ObserveFeatureFlags", "Failed setting %v: %v", strings.Join(f.configPath, "."), err) + errs = append(errs, err) + } + + return observedConfig, errs +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS new file mode 100644 index 0000000000..ce2862b870 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS @@ -0,0 +1,10 @@ +reviewers: + - squeed + - dcbw + - danwinship + - knobunc +approvers: + - squeed + - dcbw + - danwinship + - knobunc \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go new file mode 100644 index 0000000000..5a3f937dd5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go @@ -0,0 +1,59 @@ +package network + +import ( + "fmt" + + configv1 "github.com/openshift/api/config" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// GetClusterCIDRs reads the cluster CIDRs from the global network configuration resource. Emits events if CIDRs are not found. +func GetClusterCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveRestrictedCIDRFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return nil, nil + } + if err != nil { + recorder.Warningf("ObserveRestrictedCIDRFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return nil, err + } + + if len(network.Status.ClusterNetwork) == 0 { + recorder.Warningf("ObserveClusterCIDRFailed", "Required status.clusterNetwork field is not set in networks.%s/cluster", configv1.GroupName) + return nil, fmt.Errorf("networks.%s/cluster: status.clusterNetwork not found", configv1.GroupName) + } + + var clusterCIDRs []string + for i, clusterNetwork := range network.Status.ClusterNetwork { + if len(clusterNetwork.CIDR) == 0 { + recorder.Warningf("ObserveRestrictedCIDRFailed", "Required status.clusterNetwork[%d].cidr field is not set in networks.%s/cluster", i, configv1.GroupName) + return nil, fmt.Errorf("networks.%s/cluster: status.clusterNetwork[%d].cidr not found", configv1.GroupName, i) + } + clusterCIDRs = append(clusterCIDRs, clusterNetwork.CIDR) + } + // TODO fallback to podCIDR? is that still a thing? + return clusterCIDRs, nil +} + +// GetServiceCIDR reads the service IP range from the global network configuration resource. Emits events if CIDRs are not found. +func GetServiceCIDR(lister configlistersv1.NetworkLister, recorder events.Recorder) (string, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveServiceClusterIPRangesFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return "", nil + } + if err != nil { + recorder.Warningf("ObserveServiceClusterIPRangesFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return "", err + } + + if len(network.Status.ServiceNetwork) == 0 || len(network.Status.ServiceNetwork[0]) == 0 { + recorder.Warningf("ObserveServiceClusterIPRangesFailed", "Required status.serviceNetwork field is not set in networks.%s/cluster", configv1.GroupName) + return "", fmt.Errorf("networks.%s/cluster: status.serviceNetwork not found", configv1.GroupName) + } + return network.Status.ServiceNetwork[0], nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 0000000000..4f189b7087 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go b/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go new file mode 100644 index 0000000000..83ea0e88d4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go @@ -0,0 +1,46 @@ +package eventstesting + +import ( + "fmt" + "testing" + + "github.com/openshift/library-go/pkg/operator/events" +) + +type TestingEventRecorder struct { + t *testing.T + component string +} + +// NewTestingEventRecorder provides event recorder that will log all recorded events to the error log. +func NewTestingEventRecorder(t *testing.T) events.Recorder { + return &TestingEventRecorder{t: t, component: "test"} +} + +func (r *TestingEventRecorder) ComponentName() string { + return r.component +} + +func (r *TestingEventRecorder) ForComponent(c string) events.Recorder { + return &TestingEventRecorder{t: r.t, component: c} +} + +func (r *TestingEventRecorder) WithComponentSuffix(suffix string) events.Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *TestingEventRecorder) Event(reason, message string) { + r.t.Logf("Event: %v: %v", reason, message) +} + +func (r *TestingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *TestingEventRecorder) Warning(reason, message string) { + r.t.Logf("Warning: %v: %v", reason, message) +} + +func (r *TestingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 0000000000..03bceede8f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,208 @@ +package events + +import ( + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + pod := &pods.Items[0] + ownerRef := metav1.GetControllerOf(pod) + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + if _, err := r.eventClient.Create(event); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + if _, err := r.eventClient.Create(event); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 0000000000..b64d9f6a98 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,77 @@ +package events + +import ( + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 0000000000..7f3b5cd8bd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,49 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" +) + +type LoggingEventRecorder struct { + component string +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 0000000000..359d2eb81e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,70 @@ +package events + +import ( + "fmt" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" +) + +// NewKubeRecorder returns new event recorder. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + }).ForComponent(sourceComponentName) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go new file mode 100644 index 0000000000..35dc034e2c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go @@ -0,0 +1,118 @@ +package loglevel + +import ( + "fmt" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +type LogLevelController struct { + operatorClient operatorv1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// sets the klog level based on desired state +func NewClusterOperatorLoggingController( + operatorClient operatorv1helpers.OperatorClient, + recorder events.Recorder, +) *LogLevelController { + c := &LogLevelController{ + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("loglevel-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "LoggingSyncer"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c LogLevelController) sync() error { + detailedSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + currentLogLevel := CurrentLogLevel() + + // When the current loglevel is the desired one, do nothing + if currentLogLevel == detailedSpec.OperatorLogLevel { + return nil + } + + // Set the new loglevel if the operator spec changed + if err := SetVerbosityValue(detailedSpec.OperatorLogLevel); err != nil { + c.eventRecorder.Warningf("OperatorLoglevelChangeFailed", "Unable to change operator log level from %q to %q: %v", currentLogLevel, detailedSpec.OperatorLogLevel, err) + return err + } + + c.eventRecorder.Eventf("OperatorLoglevelChange", "Operator log level changed from %q to %q", currentLogLevel, detailedSpec.OperatorLogLevel) + return nil +} + +func (c *LogLevelController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting LogLevelController") + defer klog.Infof("Shutting down LogLevelController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *LogLevelController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *LogLevelController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and loglevel +func (c *LogLevelController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go new file mode 100644 index 0000000000..91e4251f06 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go @@ -0,0 +1,89 @@ +package loglevel + +import ( + "flag" + "fmt" + + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// LogLevelToKlog transforms operator log level to a klog numeric verbosity level. +func LogLevelToKlog(logLevel operatorv1.LogLevel) int { + switch logLevel { + case operatorv1.Normal: + return 2 + case operatorv1.Debug: + return 4 + case operatorv1.Trace: + return 6 + case operatorv1.TraceAll: + return 8 + default: + return 2 + } +} + +// CurrentLogLevel attempts to guess the current log level that is used by klog. +// We can use flags here as well, but this is less ugly ano more programmatically correct than flags. +func CurrentLogLevel() operatorv1.LogLevel { + switch { + case klog.V(8) == true: + return operatorv1.TraceAll + case klog.V(6) == true: + return operatorv1.Trace + case klog.V(4) == true: + return operatorv1.Debug + case klog.V(2) == true: + return operatorv1.Normal + default: + return operatorv1.Normal + } +} + +// SetVerbosityValue is a nasty hack and attempt to manipulate the global flags as klog does not expose +// a way to dynamically change the loglevel in runtime. +func SetVerbosityValue(logLevel operatorv1.LogLevel) error { + if logLevel == CurrentLogLevel() { + return nil + } + + var level *klog.Level + + // Convert operator loglevel to klog numeric string + desiredLevelValue := fmt.Sprintf("%d", LogLevelToKlog(logLevel)) + + // First, if the '-v' was specified in command line, attempt to acquire the level pointer from it. + if f := flag.CommandLine.Lookup("v"); f != nil { + if flagValue, ok := f.Value.(*klog.Level); ok { + level = flagValue + } + } + + // Second, if the '-v' was not set but is still present in flags defined for the command, attempt to acquire it + // by visiting all flags. + if level == nil { + flag.VisitAll(func(f *flag.Flag) { + if level != nil { + return + } + if levelFlag, ok := f.Value.(*klog.Level); ok { + level = levelFlag + } + }) + } + + if level != nil { + return level.Set(desiredLevelValue) + } + + // Third, if modifying the flag value (which is recommended by klog) fails, then fallback to modifying + // the internal state of klog using the empty new level. + var newLevel klog.Level + if err := newLevel.Set(desiredLevelValue); err != nil { + return fmt.Errorf("failed set klog.logging.verbosity %s: %v", desiredLevelValue, err) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go new file mode 100644 index 0000000000..78acc00d5c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go @@ -0,0 +1,69 @@ +package management + +import ( + "github.com/openshift/api/operator/v1" +) + +var ( + allowOperatorUnmanagedState = true + allowOperatorRemovedState = true +) + +// These are for unit testing +var ( + getAllowedOperatorUnmanaged = func() bool { + return allowOperatorUnmanagedState + } + getAllowedOperatorRemovedState = func() bool { + return allowOperatorRemovedState + } +) + +// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state. +// This is a case of control plane operators or operators that are required to always run otherwise the cluster will +// get into unstable state or critical components will stop working. +func SetOperatorAlwaysManaged() { + allowOperatorUnmanagedState = false +} + +// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support +// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a +// bricked, non-automatically recoverable state. +func SetOperatorNotRemovable() { + allowOperatorRemovedState = false +} + +// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state. +func IsOperatorAlwaysManaged() bool { + return !getAllowedOperatorUnmanaged() +} + +// IsOperatorNotRemovable means the operator can't bet set to removed state. +func IsOperatorNotRemovable() bool { + return !getAllowedOperatorRemovedState() +} + +func IsOperatorUnknownState(state v1.ManagementState) bool { + switch state { + case v1.Managed, v1.Removed, v1.Unmanaged: + return false + default: + return true + } +} + +// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand. +func IsOperatorManaged(state v1.ManagementState) bool { + if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() { + return true + } + switch state { + case v1.Managed: + return true + case v1.Removed: + return false + case v1.Unmanaged: + return false + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go new file mode 100644 index 0000000000..71e4f4cf7a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go @@ -0,0 +1,142 @@ +package management + +import ( + "fmt" + "time" + + "k8s.io/klog" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +// ManagementStateController watches changes of `managementState` field and react in case that field is set to an unsupported value. +// As each operator can opt-out from supporting `unmanaged` or `removed` states, this controller will add failing condition when the +// value for this field is set to this values for those operators. +type ManagementStateController struct { + operatorName string + operatorClient operatorv1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +func NewOperatorManagementStateController( + name string, + operatorClient operatorv1helpers.OperatorClient, + recorder events.Recorder, +) *ManagementStateController { + c := &ManagementStateController{ + operatorName: name, + operatorClient: operatorClient, + eventRecorder: recorder, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ManagementStateController-"+name), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +func (c ManagementStateController) sync() error { + detailedSpec, _, _, err := c.operatorClient.GetOperatorState() + if apierrors.IsNotFound(err) { + c.eventRecorder.Warningf("StatusNotFound", "Unable to determine current operator status for %s", c.operatorName) + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: "ManagementStateDegraded", + Status: operatorv1.ConditionFalse, + } + + if IsOperatorAlwaysManaged() && detailedSpec.ManagementState == operatorv1.Unmanaged { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Unmanaged" + cond.Message = fmt.Sprintf("Unmanaged is not supported for %s operator", c.operatorName) + } + + if IsOperatorNotRemovable() && detailedSpec.ManagementState == operatorv1.Removed { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Removed" + cond.Message = fmt.Sprintf("Removed is not supported for %s operator", c.operatorName) + } + + if IsOperatorUnknownState(detailedSpec.ManagementState) { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Unknown" + cond.Message = fmt.Sprintf("Unsupported management state %q for %s operator", detailedSpec.ManagementState, c.operatorName) + } + + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return nil +} + +func (c *ManagementStateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting management-state-controller-" + c.operatorName) + defer klog.Infof("Shutting down management-state-controller-" + c.operatorName) + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *ManagementStateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ManagementStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *ManagementStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go new file mode 100644 index 0000000000..ef3ea461a4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go @@ -0,0 +1,42 @@ +package options + +// ManifestConfig is a struct of values to be used in manifest templates. +type ManifestConfig struct { + // ConfigHostPath is a host path mounted into the controller manager pods to hold the config file. + ConfigHostPath string + + // ConfigFileName is the filename of config file inside ConfigHostPath. + ConfigFileName string + + // CloudProviderHostPath is a host path mounted into the apiserver pods to hold cloud provider configuration. + CloudProviderHostPath string + + // SecretsHostPath holds certs and keys + SecretsHostPath string + + // Namespace is the target namespace for the bootstrap controller manager to be created. + Namespace string + + // Image is the pull spec of the image to use for the controller manager. + Image string + + // ImagePullPolicy specifies the image pull policy to use for the images. + ImagePullPolicy string +} + +// FileConfig +type FileConfig struct { + // BootstrapConfig holds the rendered control plane component config file for bootstrapping (phase 1). + BootstrapConfig []byte + + // PostBootstrapConfig holds the rendered control plane component config file after bootstrapping (phase 2). + PostBootstrapConfig []byte + + // Assets holds the loaded assets like certs and keys. + Assets map[string][]byte +} + +type TemplateData struct { + ManifestConfig + FileConfig +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go new file mode 100644 index 0000000000..d025cd6461 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go @@ -0,0 +1,151 @@ +package options + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "text/template" + + "github.com/ghodss/yaml" + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GenericOptions contains the generic render command options. +type GenericOptions struct { + DefaultFile string + BootstrapOverrideFile string + PostBootstrapOverrideFile string + AdditionalConfigOverrideFiles []string + + ConfigOutputFile string + + TemplatesDir string + AssetInputDir string + AssetOutputDir string +} + +type Template struct { + FileName string + Content []byte +} + +// NewGenericOptions returns a default set of generic options. +func NewGenericOptions() *GenericOptions { + return &GenericOptions{ + TemplatesDir: "/usr/share/bootkube/manifests", + } +} + +// AddFlags adds the generic flags to the flagset. +func (o *GenericOptions) AddFlags(fs *pflag.FlagSet, configGVK schema.GroupVersionKind) { + fs.StringVar(&o.AssetOutputDir, "asset-output-dir", o.AssetOutputDir, "Output path for rendered manifests.") + fs.StringVar(&o.AssetInputDir, "asset-input-dir", o.AssetInputDir, "A path to directory with certificates and secrets.") + fs.StringVar(&o.TemplatesDir, "templates-input-dir", o.TemplatesDir, "A path to a directory with manifest templates.") + fs.StringSliceVar(&o.AdditionalConfigOverrideFiles, "config-override-files", o.AdditionalConfigOverrideFiles, + fmt.Sprintf("Additional sparse %s files for customiziation through the installer, merged into the default config in the given order.", gvkOutput{configGVK})) + fs.StringVar(&o.ConfigOutputFile, "config-output-file", o.ConfigOutputFile, fmt.Sprintf("Output path for the %s yaml file.", gvkOutput{configGVK})) +} + +type gvkOutput struct { + schema.GroupVersionKind +} + +func (gvk gvkOutput) String() string { + return fmt.Sprintf("%s.%s/%s", gvk.GroupVersionKind.Kind, gvk.GroupVersionKind.Group, gvk.GroupVersionKind.Version) +} + +// Complete fills in missing values before execution. +func (o *GenericOptions) Complete() error { + return nil +} + +// Validate verifies the inputs. +func (o *GenericOptions) Validate() error { + if len(o.AssetInputDir) == 0 { + return errors.New("missing required flag: --asset-input-dir") + } + if len(o.AssetOutputDir) == 0 { + return errors.New("missing required flag: --asset-output-dir") + } + if len(o.TemplatesDir) == 0 { + return errors.New("missing required flag: --templates-dir") + } + if len(o.ConfigOutputFile) == 0 { + return errors.New("missing required flag: --config-output-file") + } + + return nil +} + +// ApplyTo applies the options ot the given config struct using the provides text/template data. +func (o *GenericOptions) ApplyTo(cfg *FileConfig, defaultConfig, bootstrapOverrides, postBootstrapOverrides Template, templateData interface{}, specialCases map[string]resourcemerge.MergeFunc) error { + var err error + + cfg.BootstrapConfig, err = o.configFromDefaultsPlusOverride(defaultConfig, bootstrapOverrides, templateData, specialCases) + if err != nil { + return fmt.Errorf("failed to generate bootstrap config (phase 1): %v", err) + } + + if cfg.PostBootstrapConfig, err = o.configFromDefaultsPlusOverride(defaultConfig, postBootstrapOverrides, templateData, specialCases); err != nil { + return fmt.Errorf("failed to generate post-bootstrap config (phase 2): %v", err) + } + + // load and render templates + if cfg.Assets, err = assets.LoadFilesRecursively(o.AssetInputDir); err != nil { + return fmt.Errorf("failed loading assets from %q: %v", o.AssetInputDir, err) + } + + return nil +} + +func (o *GenericOptions) configFromDefaultsPlusOverride(defaultConfig, overrides Template, templateData interface{}, specialCases map[string]resourcemerge.MergeFunc) ([]byte, error) { + defaultConfigContent, err := renderTemplate(defaultConfig, templateData) + if err != nil { + return nil, fmt.Errorf("failed to render default config file %q as text/template: %v", defaultConfig.FileName, err) + } + + overridesContent, err := renderTemplate(overrides, templateData) + if err != nil { + return nil, fmt.Errorf("failed to render config override file %q as text/template: %v", overrides.FileName, err) + } + configs := [][]byte{defaultConfigContent, overridesContent} + for _, fname := range o.AdditionalConfigOverrideFiles { + bs, err := ioutil.ReadFile(fname) + if err != nil { + return nil, fmt.Errorf("failed to load config overrides at %q: %v", fname, err) + } + overrides, err := renderTemplate(Template{fname, bs}, templateData) + if err != nil { + return nil, fmt.Errorf("failed to render config overrides file %q as text/template: %v", fname, err) + } + + configs = append(configs, overrides) + } + mergedConfig, err := resourcemerge.MergeProcessConfig(specialCases, configs...) + if err != nil { + return nil, fmt.Errorf("failed to merge configs: %v", err) + } + yml, err := yaml.JSONToYAML(mergedConfig) + if err != nil { + return nil, err + } + + return yml, nil +} + +func renderTemplate(tpl Template, data interface{}) ([]byte, error) { + tmpl, err := template.New(tpl.FileName).Parse(string(tpl.Content)) + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go new file mode 100644 index 0000000000..e893edfafe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go @@ -0,0 +1,95 @@ +package options + +import ( + "errors" + "fmt" + + "github.com/spf13/pflag" +) + +// ManifestOptions contains the values that influence manifest contents. +type ManifestOptions struct { + Namespace string + Image string + ImagePullPolicy string + ConfigHostPath string + ConfigFileName string + CloudProviderHostPath string + SecretsHostPath string +} + +// NewManifestOptions return default values for ManifestOptions. +func NewManifestOptions(componentName, image string) *ManifestOptions { + return &ManifestOptions{ + Namespace: fmt.Sprintf("openshift-%s", componentName), + Image: image, + ImagePullPolicy: "IfNotPresent", + ConfigHostPath: "/etc/kubernetes/bootstrap-configs", + ConfigFileName: fmt.Sprintf("%s-config.yaml", componentName), + CloudProviderHostPath: "/etc/kubernetes/cloud", + SecretsHostPath: "/etc/kubernetes/bootstrap-secrets", + } +} + +// AddfFlags adds the manifest related flags to the flagset. +func (o *ManifestOptions) AddFlags(fs *pflag.FlagSet, humanReadableComponentName string) { + fs.StringVar(&o.Namespace, "manifest-namespace", o.Namespace, + fmt.Sprintf("Target namespace for phase 3 %s pods.", humanReadableComponentName)) + fs.StringVar(&o.Image, "manifest-image", o.Image, + fmt.Sprintf("Image to use for the %s.", humanReadableComponentName)) + fs.StringVar(&o.ImagePullPolicy, "manifest-image-pull-policy", o.ImagePullPolicy, + fmt.Sprintf("Image pull policy to use for the %s.", humanReadableComponentName)) + fs.StringVar(&o.ConfigHostPath, "manifest-config-host-path", o.ConfigHostPath, + fmt.Sprintf("A host path mounted into the %s pods to hold a config file.", humanReadableComponentName)) + fs.StringVar(&o.SecretsHostPath, "manifest-secrets-host-path", o.SecretsHostPath, + fmt.Sprintf("A host path mounted into the %s pods to hold secrets.", humanReadableComponentName)) + fs.StringVar(&o.ConfigFileName, "manifest-config-file-name", o.ConfigFileName, + "The config file name inside the manifest-config-host-path.") + fs.StringVar(&o.CloudProviderHostPath, "manifest-cloud-provider-host-path", o.CloudProviderHostPath, + fmt.Sprintf("A host path mounted into the %s pods to hold cloud provider configuration.", humanReadableComponentName)) +} + +// Complete fills in missing values before execution. +func (o *ManifestOptions) Complete() error { + return nil +} + +// Validate verifies the inputs. +func (o *ManifestOptions) Validate() error { + if len(o.Namespace) == 0 { + return errors.New("missing required flag: --manifest-namespace") + } + if len(o.Image) == 0 { + return errors.New("missing required flag: --manifest-image") + } + if len(o.ImagePullPolicy) == 0 { + return errors.New("missing required flag: --manifest-image-pull-policy") + } + if len(o.ConfigHostPath) == 0 { + return errors.New("missing required flag: --manifest-config-host-path") + } + if len(o.ConfigFileName) == 0 { + return errors.New("missing required flag: --manifest-config-file-name") + } + if len(o.CloudProviderHostPath) == 0 { + return errors.New("missing required flag: --manifest-cloud-provider-host-path") + } + if len(o.SecretsHostPath) == 0 { + return errors.New("missing required flag: --manifest-secrets-host-path") + } + + return nil +} + +// ApplyTo applies the options ot the given config struct. +func (o *ManifestOptions) ApplyTo(cfg *ManifestConfig) error { + cfg.Namespace = o.Namespace + cfg.Image = o.Image + cfg.ImagePullPolicy = o.ImagePullPolicy + cfg.ConfigHostPath = o.ConfigHostPath + cfg.ConfigFileName = o.ConfigFileName + cfg.CloudProviderHostPath = o.CloudProviderHostPath + cfg.SecretsHostPath = o.SecretsHostPath + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/render.go b/vendor/github.com/openshift/library-go/pkg/operator/render/render.go new file mode 100644 index 0000000000..3bbd7a03a4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/render.go @@ -0,0 +1,31 @@ +package render + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/render/options" +) + +// WriteFiles writes the manifests and the bootstrap config file. +func WriteFiles(opt *options.GenericOptions, fileConfig *options.FileConfig, templateData interface{}, additionalPredicates ...assets.FileInfoPredicate) error { + // write assets + for _, manifestDir := range []string{"bootstrap-manifests", "manifests"} { + manifests, err := assets.New(filepath.Join(opt.TemplatesDir, manifestDir), templateData, append(additionalPredicates, assets.OnlyYaml)...) + if err != nil { + return fmt.Errorf("failed rendering assets: %v", err) + } + if err := manifests.WriteFiles(filepath.Join(opt.AssetOutputDir, manifestDir)); err != nil { + return fmt.Errorf("failed writing assets to %q: %v", filepath.Join(opt.AssetOutputDir, manifestDir), err) + } + } + + // create bootstrap configuration + if err := ioutil.WriteFile(opt.ConfigOutputFile, fileConfig.BootstrapConfig, 0644); err != nil { + return fmt.Errorf("failed to write merged config to %q: %v", opt.ConfigOutputFile, err) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go new file mode 100644 index 0000000000..c75be2bf05 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go @@ -0,0 +1,42 @@ +package resourceapply + +import ( + "k8s.io/klog" + + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyCustomResourceDefinition applies the required CustomResourceDefinition to the cluster. +func ApplyCustomResourceDefinition(client apiextclientv1beta1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { + existing, err := client.CustomResourceDefinitions().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.CustomResourceDefinitions().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureCustomResourceDefinition(modified, existingCopy, *required) + if !*modified { + return existing, false, nil + } + + if klog.V(4) { + klog.Infof("CustomResourceDefinition %q changes: %s", existing.Name, JSONPatch(existing, existingCopy)) + } + + actual, err := client.CustomResourceDefinitions().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go new file mode 100644 index 0000000000..81a81a2d01 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go @@ -0,0 +1,45 @@ +package resourceapply + +import ( + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyAPIService merges objectmeta and requires apiservice coordinates. It does not touch CA bundles, which should be managed via service CA controller. +func ApplyAPIService(client apiregistrationv1client.APIServicesGetter, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) { + existing, err := client.APIServices().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.APIServices().Create(required) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + serviceSame := equality.Semantic.DeepEqual(existingCopy.Spec.Service, required.Spec.Service) + prioritySame := existingCopy.Spec.VersionPriority == required.Spec.VersionPriority && existingCopy.Spec.GroupPriorityMinimum == required.Spec.GroupPriorityMinimum + insecureSame := existingCopy.Spec.InsecureSkipTLSVerify == required.Spec.InsecureSkipTLSVerify + // there was no change to metadata, the service and priorities were right + if !*modified && serviceSame && prioritySame && insecureSame { + return existingCopy, false, nil + } + + existingCopy.Spec = required.Spec + + if klog.V(4) { + klog.Infof("APIService %q changes: %s", existing.Name, JSONPatch(existing, existingCopy)) + } + actual, err := client.APIServices().Update(existingCopy) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go new file mode 100644 index 0000000000..c21307ec4f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go @@ -0,0 +1,114 @@ +package resourceapply + +import ( + "k8s.io/klog" + + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyDeployment merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error +func ApplyDeployment(client appsclientv1.DeploymentsGetter, recorder events.Recorder, required *appsv1.Deployment, expectedGeneration int64, + forceRollout bool) (*appsv1.Deployment, bool, error) { + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + existing, err := client.Deployments(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Deployments(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4) { + klog.Infof("Deployment %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, toWrite)) + } + + actual, err := client.Deployments(required.Namespace).Update(toWrite) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyDaemonSet merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error +func ApplyDaemonSet(client appsclientv1.DaemonSetsGetter, recorder events.Recorder, required *appsv1.DaemonSet, expectedGeneration int64, forceRollout bool) (*appsv1.DaemonSet, bool, error) { + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + existing, err := client.DaemonSets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.DaemonSets(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4) { + klog.Infof("DaemonSet %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, toWrite)) + } + actual, err := client.DaemonSets(required.Namespace).Update(toWrite) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go new file mode 100644 index 0000000000..870b7ceb64 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -0,0 +1,280 @@ +package resourceapply + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyNamespace merges objectmeta, does not worry about anything else +func ApplyNamespace(client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) { + existing, err := client.Namespaces().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Namespaces().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4) { + klog.Infof("Namespace %q changes: %v", required.Name, JSONPatch(existing, existingCopy)) + } + + actual, err := client.Namespaces().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyService merges objectmeta and requires +// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update +// TODO I've special cased the selector for now +func ApplyService(client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) { + existing, err := client.Services(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Services(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + selectorSame := equality.Semantic.DeepEqual(existingCopy.Spec.Selector, required.Spec.Selector) + + typeSame := false + requiredIsEmpty := len(required.Spec.Type) == 0 + existingCopyIsCluster := existingCopy.Spec.Type == corev1.ServiceTypeClusterIP + if (requiredIsEmpty && existingCopyIsCluster) || equality.Semantic.DeepEqual(existingCopy.Spec.Type, required.Spec.Type) { + typeSame = true + } + + if selectorSame && typeSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Spec.Selector = required.Spec.Selector + existingCopy.Spec.Type = required.Spec.Type // if this is different, the update will fail. Status will indicate it. + + if klog.V(4) { + klog.Infof("Service %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + } + + actual, err := client.Services(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyPod merges objectmeta, does not worry about anything else +func ApplyPod(client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) { + existing, err := client.Pods(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Pods(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4) { + klog.Infof("Pod %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + } + + actual, err := client.Pods(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyServiceAccount merges objectmeta, does not worry about anything else +func ApplyServiceAccount(client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { + existing, err := client.ServiceAccounts(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ServiceAccounts(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + if klog.V(4) { + klog.Infof("ServiceAccount %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + } + actual, err := client.ServiceAccounts(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyConfigMap merges objectmeta, requires data +func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + existing, err := client.ConfigMaps(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ConfigMaps(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + + var modifiedKeys []string + for existingCopyKey, existingCopyValue := range existingCopy.Data { + if requiredValue, ok := required.Data[existingCopyKey]; !ok || (existingCopyValue != requiredValue) { + modifiedKeys = append(modifiedKeys, "data."+existingCopyKey) + } + } + for requiredKey := range required.Data { + if _, ok := existingCopy.Data[requiredKey]; !ok { + modifiedKeys = append(modifiedKeys, "data."+requiredKey) + } + } + + dataSame := len(modifiedKeys) == 0 + if dataSame && !*modified { + return existingCopy, false, nil + } + existingCopy.Data = required.Data + + actual, err := client.ConfigMaps(required.Namespace).Update(existingCopy) + + var details string + if !dataSame { + sort.Sort(sort.StringSlice(modifiedKeys)) + details = fmt.Sprintf("cause by changes in %v", strings.Join(modifiedKeys, ",")) + } + if klog.V(4) { + klog.Infof("ConfigMap %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + } + reportUpdateEvent(recorder, required, err, details) + return actual, true, err +} + +// ApplySecret merges objectmeta, requires data +func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + existing, err := client.Secrets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Secrets(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + dataSame := equality.Semantic.DeepEqual(existingCopy.Data, required.Data) + if dataSame && !*modified { + return existingCopy, false, nil + } + existingCopy.Data = required.Data + + if klog.V(4) { + klog.Infof("Secret %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + } + actual, err := client.Secrets(required.Namespace).Update(existingCopy) + + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +func SyncConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + source, err := client.ConfigMaps(sourceNamespace).Get(sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + deleteErr := client.ConfigMaps(targetNamespace).Delete(targetName, nil) + if _, getErr := client.ConfigMaps(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } + if apierrors.IsNotFound(deleteErr) { + return nil, false, nil + } + if deleteErr == nil { + recorder.Eventf("TargetConfigDeleted", "Deleted target configmap %s/%s because source config does not exist", targetNamespace, targetName) + return nil, true, nil + } + return nil, false, deleteErr + case err != nil: + return nil, false, err + default: + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplyConfigMap(client, recorder, source) + } +} + +func SyncSecret(client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + source, err := client.Secrets(sourceNamespace).Get(sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + if _, getErr := client.Secrets(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } + deleteErr := client.Secrets(targetNamespace).Delete(targetName, nil) + if apierrors.IsNotFound(deleteErr) { + return nil, false, nil + } + if deleteErr == nil { + recorder.Eventf("TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName) + return nil, true, nil + } + return nil, false, deleteErr + case err != nil: + return nil, false, err + default: + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplySecret(client, recorder, source) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go new file mode 100644 index 0000000000..55142ad2b6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go @@ -0,0 +1,86 @@ +package resourceapply + +import ( + "fmt" + "strings" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + kubescheme "k8s.io/client-go/kubernetes/scheme" + + openshiftapi "github.com/openshift/api" + + "github.com/openshift/library-go/pkg/operator/events" +) + +var ( + openshiftScheme = runtime.NewScheme() +) + +func init() { + if err := openshiftapi.Install(openshiftScheme); err != nil { + panic(err) + } +} + +// guessObjectKind returns a human name for the passed runtime object. +func guessObjectGroupKind(object runtime.Object) (string, string) { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return gvk.Group, gvk.Kind + } + if kinds, _, _ := kubescheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0].Group, kinds[0].Kind + } + if kinds, _, _ := openshiftScheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0].Group, kinds[0].Kind + } + return "unknown", "Object" + +} + +func reportCreateEvent(recorder events.Recorder, obj runtime.Object, originalErr error) { + reportingGroup, reportingKind := guessObjectGroupKind(obj) + if len(reportingGroup) != 0 { + reportingGroup = "." + reportingGroup + } + accessor, err := meta.Accessor(obj) + if err != nil { + klog.Errorf("Failed to get accessor for %+v", obj) + return + } + namespace := "" + if len(accessor.GetNamespace()) > 0 { + namespace = " -n " + accessor.GetNamespace() + } + if originalErr == nil { + recorder.Eventf(fmt.Sprintf("%sCreated", reportingKind), "Created %s%s/%s%s because it was missing", reportingKind, reportingGroup, accessor.GetName(), namespace) + return + } + recorder.Warningf(fmt.Sprintf("%sCreateFailed", reportingKind), "Failed to create %s%s/%s%s: %v", reportingKind, reportingGroup, accessor.GetName(), namespace, originalErr) +} + +func reportUpdateEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) { + reportingGroup, reportingKind := guessObjectGroupKind(obj) + if len(reportingGroup) != 0 { + reportingGroup = "." + reportingGroup + } + accessor, err := meta.Accessor(obj) + if err != nil { + klog.Errorf("Failed to get accessor for %+v", obj) + return + } + namespace := "" + if len(accessor.GetNamespace()) > 0 { + namespace = " -n " + accessor.GetNamespace() + } + switch { + case originalErr != nil: + recorder.Warningf(fmt.Sprintf("%sUpdateFailed", reportingKind), "Failed to update %s%s/%s%s: %v", reportingKind, reportingGroup, accessor.GetName(), namespace, originalErr) + case len(details) == 0: + recorder.Eventf(fmt.Sprintf("%sUpdated", reportingKind), "Updated %s%s/%s%s because it changed", reportingKind, reportingGroup, accessor.GetName(), namespace) + default: + recorder.Eventf(fmt.Sprintf("%sUpdated", reportingKind), "Updated %s%s/%s%s: %s", reportingKind, reportingGroup, accessor.GetName(), namespace, strings.Join(details, "\n")) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go new file mode 100644 index 0000000000..4e91c2bb5e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -0,0 +1,87 @@ +package resourceapply + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + + "github.com/openshift/api" + "github.com/openshift/library-go/pkg/operator/events" +) + +var ( + genericScheme = runtime.NewScheme() + genericCodecs = serializer.NewCodecFactory(genericScheme) + genericCodec = genericCodecs.UniversalDeserializer() +) + +func init() { + utilruntime.Must(api.InstallKube(genericScheme)) +} + +type AssetFunc func(name string) ([]byte, error) + +type ApplyResult struct { + File string + Type string + Result runtime.Object + Changed bool + Error error +} + +// ApplyDirectly applies the given manifest files to API server. +func ApplyDirectly(kubeClient kubernetes.Interface, recorder events.Recorder, manifests AssetFunc, files ...string) []ApplyResult { + ret := []ApplyResult{} + + for _, file := range files { + result := ApplyResult{File: file} + objBytes, err := manifests(file) + if err != nil { + result.Error = fmt.Errorf("missing %q: %v", file, err) + ret = append(ret, result) + continue + } + requiredObj, _, err := genericCodec.Decode(objBytes, nil, nil) + if err != nil { + result.Error = fmt.Errorf("cannot decode %q: %v", file, err) + ret = append(ret, result) + continue + } + result.Type = fmt.Sprintf("%T", requiredObj) + + // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems. + switch t := requiredObj.(type) { + case *corev1.Namespace: + result.Result, result.Changed, result.Error = ApplyNamespace(kubeClient.CoreV1(), recorder, t) + case *corev1.Service: + result.Result, result.Changed, result.Error = ApplyService(kubeClient.CoreV1(), recorder, t) + case *corev1.Pod: + result.Result, result.Changed, result.Error = ApplyPod(kubeClient.CoreV1(), recorder, t) + case *corev1.ServiceAccount: + result.Result, result.Changed, result.Error = ApplyServiceAccount(kubeClient.CoreV1(), recorder, t) + case *corev1.ConfigMap: + result.Result, result.Changed, result.Error = ApplyConfigMap(kubeClient.CoreV1(), recorder, t) + case *corev1.Secret: + result.Result, result.Changed, result.Error = ApplySecret(kubeClient.CoreV1(), recorder, t) + case *rbacv1.ClusterRole: + result.Result, result.Changed, result.Error = ApplyClusterRole(kubeClient.RbacV1(), recorder, t) + case *rbacv1.ClusterRoleBinding: + result.Result, result.Changed, result.Error = ApplyClusterRoleBinding(kubeClient.RbacV1(), recorder, t) + case *rbacv1.Role: + result.Result, result.Changed, result.Error = ApplyRole(kubeClient.RbacV1(), recorder, t) + case *rbacv1.RoleBinding: + result.Result, result.Changed, result.Error = ApplyRoleBinding(kubeClient.RbacV1(), recorder, t) + default: + result.Error = fmt.Errorf("unhandled type %T", requiredObj) + } + + ret = append(ret, result) + } + + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go new file mode 100644 index 0000000000..c5077f48e8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -0,0 +1,33 @@ +package resourceapply + +import ( + "fmt" + + patch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// JSONPatch generates a JSON patch between original and modified objects and return the JSON as a string. +// In case of error, the returned string will contain the error messages. +func JSONPatch(original, modified runtime.Object) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + originalJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, original) + if err != nil { + return fmt.Sprintf("unable to decode original to JSON: %v", err) + } + modifiedJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, modified) + if err != nil { + return fmt.Sprintf("unable to decode modified to JSON: %v", err) + } + patchBytes, err := patch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return fmt.Sprintf("unable to create JSON patch: %v", err) + } + return string(patchBytes) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go new file mode 100644 index 0000000000..efc4ddb5da --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go @@ -0,0 +1,101 @@ +package resourceapply + +import ( + "fmt" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + "github.com/openshift/library-go/pkg/operator/events" +) + +var serviceMonitorGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"} + +func ensureServiceMonitorSpec(required, existing *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + requiredSpec, _, err := unstructured.NestedMap(required.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + + if err := mergo.Merge(&existingSpec, &requiredSpec); err != nil { + return nil, false, err + } + + if equality.Semantic.DeepEqual(existingSpec, requiredSpec) { + return existing, false, nil + } + + existingCopy := existing.DeepCopy() + if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), existingSpec, "spec"); err != nil { + return nil, true, err + } + + return existingCopy, true, nil +} + +// ApplyServiceMonitor applies the Prometheus service monitor. +func ApplyServiceMonitor(client dynamic.Interface, recorder events.Recorder, serviceMonitorBytes []byte) (bool, error) { + monitorJSON, err := yaml.YAMLToJSON(serviceMonitorBytes) + if err != nil { + return false, err + } + + monitorObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, monitorJSON) + if err != nil { + return false, err + } + + required, ok := monitorObj.(*unstructured.Unstructured) + if !ok { + return false, fmt.Errorf("unexpected object in %t", monitorObj) + } + + namespace := required.GetNamespace() + + existing, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(required.GetName(), metav1.GetOptions{}) + if errors.IsNotFound(err) { + _, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(required, metav1.CreateOptions{}) + if createErr != nil { + recorder.Warningf("ServiceMonitorCreateFailed", "Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v", createErr) + return true, createErr + } + recorder.Eventf("ServiceMonitorCreated", "Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing") + return true, nil + } + + existingCopy := existing.DeepCopy() + + updated, endpointsModified, err := ensureServiceMonitorSpec(required, existingCopy) + if err != nil { + return false, err + } + + if !endpointsModified { + return false, nil + } + + if klog.V(4) { + klog.Infof("ServiceMonitor %q changes: %v", namespace+"/"+required.GetName(), JSONPatch(existing, existingCopy)) + } + + if _, err = client.Resource(serviceMonitorGVR).Namespace(namespace).Update(updated, metav1.UpdateOptions{}); err != nil { + recorder.Warningf("ServiceMonitorUpdateFailed", "Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v", err) + return true, err + } + + recorder.Eventf("ServiceMonitorUpdated", "Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed") + return true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go new file mode 100644 index 0000000000..43f4130249 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go @@ -0,0 +1,190 @@ +package resourceapply + +import ( + "fmt" + + "k8s.io/klog" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyClusterRole merges objectmeta, requires rules, aggregation rules are not allowed for now. +func ApplyClusterRole(client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + if required.AggregationRule != nil && len(required.AggregationRule.ClusterRoleSelectors) != 0 { + return nil, false, fmt.Errorf("cannot create an aggregated cluster role") + } + + existing, err := client.ClusterRoles().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoles().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + existingCopy.AggregationRule = nil + + if klog.V(4) { + klog.Infof("ClusterRole %q changes: %v", required.Name, JSONPatch(existing, existingCopy)) + } + + actual, err := client.ClusterRoles().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyClusterRoleBinding(client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + existing, err := client.ClusterRoleBindings().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoleBindings().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4) { + klog.Infof("ClusterRoleBinding %q changes: %v", requiredCopy.Name, JSONPatch(existing, existingCopy)) + } + + actual, err := client.ClusterRoleBindings().Update(existingCopy) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} + +// ApplyRole merges objectmeta, requires rules +func ApplyRole(client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + existing, err := client.Roles(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Roles(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + + if klog.V(4) { + klog.Infof("Role %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, existingCopy)) + } + actual, err := client.Roles(required.Namespace).Update(existing) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyRoleBinding(client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + existing, err := client.RoleBindings(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.RoleBindings(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs and subjects + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4) { + klog.Infof("RoleBinding %q changes: %v", requiredCopy.Namespace+"/"+requiredCopy.Name, JSONPatch(existing, existingCopy)) + } + + actual, err := client.RoleBindings(requiredCopy.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go new file mode 100644 index 0000000000..28aaa8d83a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -0,0 +1,50 @@ +package resourceapply + +import ( + "k8s.io/klog" + + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storageclientv1 "k8s.io/client-go/kubernetes/typed/storage/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyStorageClass merges objectmeta, tries to write everything else +func ApplyStorageClass(client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool, + error) { + existing, err := client.StorageClasses().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.StorageClasses().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy, required) + if contentSame && !*modified { + return existingCopy, false, nil + } + + objectMeta := existingCopy.ObjectMeta.DeepCopy() + existingCopy = required.DeepCopy() + existingCopy.ObjectMeta = *objectMeta + + if klog.V(4) { + klog.Infof("StorageClass %q changes: %v", required.Name, JSONPatch(existing, existingCopy)) + } + + // TODO if provisioner, parameters, reclaimpolicy, or volumebindingmode are different, update will fail so delete and recreate + actual, err := client.StorageClasses().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go new file mode 100644 index 0000000000..50458f784d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go @@ -0,0 +1,16 @@ +package resourcegraph + +type ResourceCoordinates struct { + Group string + Resource string + Namespace string + Name string +} + +func (c ResourceCoordinates) String() string { + resource := c.Resource + if len(c.Group) > 0 { + resource = resource + "." + c.Group + } + return resource + "/" + c.Name + "[" + c.Namespace + "]" +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go new file mode 100644 index 0000000000..a7402b8169 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go @@ -0,0 +1,62 @@ +package resourcegraph + +import ( + "fmt" + + "github.com/gonum/graph" +) + +func NewResources() Resources { + return &resourcesImpl{} +} + +func NewResource(coordinates ResourceCoordinates) Resource { + return &simpleSource{coordinates: coordinates} +} + +func NewConfigMap(namespace, name string) Resource { + return NewResource(NewCoordinates("", "configmaps", namespace, name)) +} + +func NewSecret(namespace, name string) Resource { + return NewResource(NewCoordinates("", "secrets", namespace, name)) +} + +func NewOperator(name string) Resource { + return NewResource(NewCoordinates("config.openshift.io", "clusteroperators", "", name)) +} + +func NewConfig(resource string) Resource { + return NewResource(NewCoordinates("config.openshift.io", resource, "", "cluster")) +} + +type Resource interface { + Add(resources Resources) Resource + From(Resource) Resource + Note(note string) Resource + + fmt.Stringer + GetNote() string + Coordinates() ResourceCoordinates + Sources() []Resource + Dump(indentDepth int) []string + DumpSources(indentDepth int) []string +} + +type Resources interface { + Add(resource Resource) + Dump() []string + AllResources() []Resource + Resource(coordinates ResourceCoordinates) Resource + Roots() []Resource + NewGraph() graph.Directed +} + +func NewCoordinates(group, resource, namespace, name string) ResourceCoordinates { + return ResourceCoordinates{ + Group: group, + Resource: resource, + Namespace: namespace, + Name: name, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go new file mode 100644 index 0000000000..e3668e76c5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go @@ -0,0 +1,73 @@ +package resourcegraph + +import ( + "fmt" +) + +type simpleSource struct { + coordinates ResourceCoordinates + note string + nested []Resource + sources []Resource +} + +func (r *simpleSource) Coordinates() ResourceCoordinates { + return r.coordinates +} + +func (s *simpleSource) Add(resources Resources) Resource { + resources.Add(s) + return s +} + +func (s *simpleSource) From(source Resource) Resource { + s.sources = append(s.sources, source) + return s +} + +func (s *simpleSource) Note(note string) Resource { + s.note = note + return s +} + +func (s *simpleSource) String() string { + return fmt.Sprintf("%v%s", s.coordinates, s.note) +} + +func (s *simpleSource) GetNote() string { + return s.note +} + +func (s *simpleSource) Sources() []Resource { + return s.sources +} + +func (r *simpleSource) Dump(indentDepth int) []string { + lines := []string{} + lines = append(lines, indent(indentDepth, r.String())) + + for _, nested := range r.nested { + lines = append(lines, nested.Dump(indentDepth+1)...) + } + + return lines +} + +func (r *simpleSource) DumpSources(indentDepth int) []string { + lines := []string{} + lines = append(lines, indent(indentDepth, r.String())) + + for _, source := range r.sources { + lines = append(lines, source.DumpSources(indentDepth+1)...) + } + + return lines +} + +func indent(depth int, in string) string { + indent := "" + for i := 0; i < depth; i++ { + indent = indent + " " + } + return indent + in +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go new file mode 100644 index 0000000000..482ea9b8d5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go @@ -0,0 +1,126 @@ +package resourcegraph + +import ( + "fmt" + "strings" + + "github.com/gonum/graph" + "github.com/gonum/graph/encoding/dot" + "github.com/gonum/graph/simple" +) + +type resourcesImpl struct { + resources []Resource +} + +func (r *resourcesImpl) Add(resource Resource) { + r.resources = append(r.resources, resource) +} + +func (r *resourcesImpl) Dump() []string { + lines := []string{} + for _, root := range r.Roots() { + lines = append(lines, root.Dump(0)...) + } + return lines +} + +func (r *resourcesImpl) AllResources() []Resource { + ret := []Resource{} + for _, v := range r.resources { + ret = append(ret, v) + } + return ret +} + +func (r *resourcesImpl) Resource(coordinates ResourceCoordinates) Resource { + for _, v := range r.resources { + if v.Coordinates() == coordinates { + return v + } + } + return nil +} + +func (r *resourcesImpl) Roots() []Resource { + ret := []Resource{} + for _, resource := range r.AllResources() { + if len(resource.Sources()) > 0 { + continue + } + ret = append(ret, resource) + } + return ret +} + +type resourceGraphNode struct { + simple.Node + Resource Resource +} + +// DOTAttributes implements an attribute getter for the DOT encoding +func (n resourceGraphNode) DOTAttributes() []dot.Attribute { + color := "white" + switch { + case n.Resource.Coordinates().Resource == "clusteroperators": + color = `"#c8fbcd"` // green + case n.Resource.Coordinates().Resource == "configmaps": + color = `"#bdebfd"` // blue + case n.Resource.Coordinates().Resource == "secrets": + color = `"#fffdb8"` // yellow + case n.Resource.Coordinates().Resource == "pods": + color = `"#ffbfb8"` // red + case n.Resource.Coordinates().Group == "config.openshift.io": + color = `"#c7bfff"` // purple + } + resource := n.Resource.Coordinates().Resource + if len(n.Resource.Coordinates().Group) > 0 { + resource = resource + "." + n.Resource.Coordinates().Group + } + label := fmt.Sprintf("%s\n%s\n%s\n%s", resource, n.Resource.Coordinates().Name, n.Resource.Coordinates().Namespace, n.Resource.GetNote()) + return []dot.Attribute{ + {Key: "label", Value: fmt.Sprintf("%q", label)}, + {Key: "style", Value: "filled"}, + {Key: "fillcolor", Value: color}, + } +} + +func (r *resourcesImpl) NewGraph() graph.Directed { + g := simple.NewDirectedGraph(1.0, 0.0) + + coordinatesToNode := map[ResourceCoordinates]graph.Node{} + idToCoordinates := map[int]ResourceCoordinates{} + + // make all nodes + allResources := r.AllResources() + for i := range allResources { + resource := allResources[i] + id := g.NewNodeID() + node := resourceGraphNode{Node: simple.Node(id), Resource: resource} + + coordinatesToNode[resource.Coordinates()] = node + idToCoordinates[id] = resource.Coordinates() + g.AddNode(node) + } + + // make all edges + for i := range allResources { + resource := allResources[i] + + for _, source := range resource.Sources() { + from := coordinatesToNode[source.Coordinates()] + to := coordinatesToNode[resource.Coordinates()] + g.SetEdge(simple.Edge{F: from, T: to}) + } + } + + return g +} + +// Quote takes an arbitrary DOT ID and escapes any quotes that is contains. +// The resulting string is quoted again to guarantee that it is a valid ID. +// DOT graph IDs can be any double-quoted string +// See http://www.graphviz.org/doc/info/lang.html +func Quote(id string) string { + return fmt.Sprintf(`"%s"`, strings.Replace(id, `"`, `\"`, -1)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go new file mode 100644 index 0000000000..aa8b3ec27d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go @@ -0,0 +1,171 @@ +package resourcehash + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "hash/fnv" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/listers/core/v1" +) + +// GetConfigMapHash returns a hash of the configmap data +func GetConfigMapHash(obj *corev1.ConfigMap) (string, error) { + hasher := fnv.New32() + if err := json.NewEncoder(hasher).Encode(obj.Data); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil +} + +// GetSecretHash returns a hash of the secret data +func GetSecretHash(obj *corev1.Secret) (string, error) { + hasher := fnv.New32() + if err := json.NewEncoder(hasher).Encode(obj.Data); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil +} + +// MultipleObjectHashStringMap returns a map of key/hash pairs suitable for merging into a configmap +func MultipleObjectHashStringMap(objs ...runtime.Object) (map[string]string, error) { + ret := map[string]string{} + + for _, obj := range objs { + switch t := obj.(type) { + case *corev1.ConfigMap: + hash, err := GetConfigMapHash(t) + if err != nil { + return nil, err + } + // this string coercion is lossy, but it should be fairly controlled and must be an allowed name + ret[mapKeyFor("configmap", t.Namespace, t.Name)] = hash + + case *corev1.Secret: + hash, err := GetSecretHash(t) + if err != nil { + return nil, err + } + // this string coercion is lossy, but it should be fairly controlled and must be an allowed name + ret[mapKeyFor("secret", t.Namespace, t.Name)] = hash + + default: + return nil, fmt.Errorf("%T is not handled", t) + } + } + + return ret, nil +} + +func mapKeyFor(resource, namespace, name string) string { + return fmt.Sprintf("%s.%s.%s", namespace, name, resource) +} + +// ObjectReference can be used to reference a particular resource. Not all group resources are respected by all methods. +type ObjectReference struct { + Resource schema.GroupResource + Namespace string + Name string +} + +// MultipleObjectHashStringMapForObjectReferences returns a map of key/hash pairs suitable for merging into a configmap +func MultipleObjectHashStringMapForObjectReferences(client kubernetes.Interface, objRefs ...*ObjectReference) (map[string]string, error) { + objs := []runtime.Object{} + + for _, objRef := range objRefs { + switch objRef.Resource { + case schema.GroupResource{Resource: "configmap"}, schema.GroupResource{Resource: "configmaps"}: + obj, err := client.CoreV1().ConfigMaps(objRef.Namespace).Get(objRef.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + case schema.GroupResource{Resource: "secret"}, schema.GroupResource{Resource: "secrets"}: + obj, err := client.CoreV1().Secrets(objRef.Namespace).Get(objRef.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + default: + return nil, fmt.Errorf("%v is not handled", objRef.Resource) + } + } + + return MultipleObjectHashStringMap(objs...) +} + +// MultipleObjectHashStringMapForObjectReferenceFromLister is MultipleObjectHashStringMapForObjectReferences using a lister for performance +func MultipleObjectHashStringMapForObjectReferenceFromLister(configmapLister v1.ConfigMapLister, secretLister v1.SecretLister, objRefs ...*ObjectReference) (map[string]string, error) { + objs := []runtime.Object{} + + for _, objRef := range objRefs { + switch objRef.Resource { + case schema.GroupResource{Resource: "configmap"}, schema.GroupResource{Resource: "configmaps"}: + obj, err := configmapLister.ConfigMaps(objRef.Namespace).Get(objRef.Name) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + case schema.GroupResource{Resource: "secret"}, schema.GroupResource{Resource: "secrets"}: + obj, err := secretLister.Secrets(objRef.Namespace).Get(objRef.Name) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + default: + return nil, fmt.Errorf("%v is not handled", objRef.Resource) + } + } + + return MultipleObjectHashStringMap(objs...) +} + +func NewObjectRef() *ObjectReference { + return &ObjectReference{} +} + +func (r *ObjectReference) ForConfigMap() *ObjectReference { + r.Resource = schema.GroupResource{Resource: "configmaps"} + return r +} + +func (r *ObjectReference) ForSecret() *ObjectReference { + r.Resource = schema.GroupResource{Resource: "secrets"} + return r +} + +func (r *ObjectReference) Named(name string) *ObjectReference { + r.Name = name + return r +} + +func (r *ObjectReference) InNamespace(namespace string) *ObjectReference { + r.Namespace = namespace + return r +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go new file mode 100644 index 0000000000..32e4043f62 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go @@ -0,0 +1,18 @@ +package resourcemerge + +import ( + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureCustomResourceDefinition ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinition(modified *bool, existing *apiextv1beta1.CustomResourceDefinition, required apiextv1beta1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go new file mode 100644 index 0000000000..1731382e68 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go @@ -0,0 +1,80 @@ +package resourcemerge + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorsv1 "github.com/openshift/api/operator/v1" +) + +func GenerationFor(generations []operatorsv1.GenerationStatus, resource schema.GroupResource, namespace, name string) *operatorsv1.GenerationStatus { + for i := range generations { + curr := &generations[i] + if curr.Namespace == namespace && + curr.Name == name && + curr.Group == resource.Group && + curr.Resource == resource.Resource { + + return curr + } + } + + return nil +} + +func SetGeneration(generations *[]operatorsv1.GenerationStatus, newGeneration operatorsv1.GenerationStatus) { + if generations == nil { + generations = &[]operatorsv1.GenerationStatus{} + } + + existingGeneration := GenerationFor(*generations, schema.GroupResource{Group: newGeneration.Group, Resource: newGeneration.Resource}, newGeneration.Namespace, newGeneration.Name) + if existingGeneration == nil { + *generations = append(*generations, newGeneration) + return + } + + existingGeneration.LastGeneration = newGeneration.LastGeneration + existingGeneration.Hash = newGeneration.Hash +} + +func ExpectedDeploymentGeneration(required *appsv1.Deployment, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "deployments"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDeploymentGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.Deployment) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "deployments", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} + +func ExpectedDaemonSetGeneration(required *appsv1.DaemonSet, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "daemonsets"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDaemonSetGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.DaemonSet) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "daemonsets", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go new file mode 100644 index 0000000000..dc3a9db3d6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go @@ -0,0 +1,134 @@ +package resourcemerge + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap +func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + configBytes, err := MergeProcessConfig(specialCases, configYAMLs...) + if err != nil { + return nil, false, err + } + + if reflect.DeepEqual(configMap.Data[configKey], configBytes) { + return configMap, false, nil + } + + ret := configMap.DeepCopy() + ret.Data[configKey] = string(configBytes) + + return ret, true, nil +} + +// MergeProcessConfig merges a series of config yaml files together with each later one overlaying all previous +func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + currentConfigYAML := configYAMLs[0] + + for _, currConfigYAML := range configYAMLs[1:] { + prevConfigJSON, err := kyaml.ToJSON(currentConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + prevConfigJSON = currentConfigYAML + } + prevConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(prevConfigJSON)).Decode(&prevConfig); err != nil { + return nil, err + } + + if len(currConfigYAML) > 0 { + currConfigJSON, err := kyaml.ToJSON(currConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + currConfigJSON = currConfigYAML + } + currConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(currConfigJSON)).Decode(&currConfig); err != nil { + return nil, err + } + + // protected against mismatched typemeta + prevAPIVersion, _, _ := unstructured.NestedString(prevConfig, "apiVersion") + prevKind, _, _ := unstructured.NestedString(prevConfig, "kind") + currAPIVersion, _, _ := unstructured.NestedString(currConfig, "apiVersion") + currKind, _, _ := unstructured.NestedString(currConfig, "kind") + currGVKSet := len(currAPIVersion) > 0 || len(currKind) > 0 + gvkMismatched := currAPIVersion != prevAPIVersion || currKind != prevKind + if currGVKSet && gvkMismatched { + return nil, fmt.Errorf("%v/%v does not equal %v/%v", currAPIVersion, currKind, prevAPIVersion, prevKind) + } + + if err := mergeConfig(prevConfig, currConfig, "", specialCases); err != nil { + return nil, err + } + } + + currentConfigYAML, err = runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: prevConfig}) + if err != nil { + return nil, err + } + } + + return currentConfigYAML, nil +} + +type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) + +// mergeConfig overwrites entries in curr by additional. It modifies curr. +func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]MergeFunc) error { + for additionalKey, additionalVal := range additional { + fullKey := currentPath + "." + additionalKey + specialCase, ok := specialCases[fullKey] + if ok { + var err error + curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath) + if err != nil { + return err + } + continue + } + + currVal, ok := curr[additionalKey] + if !ok { + curr[additionalKey] = additionalVal + continue + } + + // only some scalars are accepted + switch castVal := additionalVal.(type) { + case map[string]interface{}: + currValAsMap, ok := currVal.(map[string]interface{}) + if !ok { + currValAsMap = map[string]interface{}{} + curr[additionalKey] = currValAsMap + } + + err := mergeConfig(currValAsMap, castVal, fullKey, specialCases) + if err != nil { + return err + } + continue + + default: + if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil { + return err + } + } + + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go new file mode 100644 index 0000000000..9d03da6e2d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go @@ -0,0 +1,153 @@ +package resourcemerge + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnsureObjectMeta writes namespace, name, labels, and annotations. Don't set other things here. +// TODO finalizer support maybe? +func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { + SetStringIfSet(modified, &existing.Namespace, required.Namespace) + SetStringIfSet(modified, &existing.Name, required.Name) + MergeMap(modified, &existing.Labels, required.Labels) + MergeMap(modified, &existing.Annotations, required.Annotations) +} + +func stringPtr(val string) *string { + return &val +} + +func SetString(modified *bool, existing *string, required string) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetStringIfSet(modified *bool, existing *string, required string) { + if len(required) == 0 { + return + } + if required != *existing { + *existing = required + *modified = true + } +} + +func setStringPtr(modified *bool, existing **string, required *string) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetString(modified, *existing, *required) +} + +func SetStringSlice(modified *bool, existing *[]string, required []string) { + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func SetStringSliceIfSet(modified *bool, existing *[]string, required []string) { + if required == nil { + return + } + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func BoolPtr(val bool) *bool { + return &val +} + +func SetBool(modified *bool, existing *bool, required bool) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setBoolPtr(modified *bool, existing **bool, required *bool) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetBool(modified, *existing, *required) +} + +func int64Ptr(val int64) *int64 { + return &val +} + +func SetInt32(modified *bool, existing *int32, required int32) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetInt32IfSet(modified *bool, existing *int32, required int32) { + if required == 0 { + return + } + + SetInt32(modified, existing, required) +} + +func SetInt64(modified *bool, existing *int64, required int64) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setInt64Ptr(modified *bool, existing **int64, required *int64) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetInt64(modified, *existing, *required) +} + +func MergeMap(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + for k, v := range required { + if existingV, ok := (*existing)[k]; !ok || v != existingV { + *modified = true + (*existing)[k] = v + } + } +} + +func SetMapStringString(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} + +func SetMapStringStringIfSet(modified *bool, existing *map[string]string, required map[string]string) { + if required == nil { + return + } + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go new file mode 100644 index 0000000000..81a11c871c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + apiExtensionsScheme = runtime.NewScheme() + apiExtensionsCodecs = serializer.NewCodecFactory(apiExtensionsScheme) +) + +func init() { + if err := apiextensionsv1beta1.AddToScheme(apiExtensionsScheme); err != nil { + panic(err) + } +} + +func ReadCustomResourceDefinitionV1Beta1OrDie(objBytes []byte) *apiextensionsv1beta1.CustomResourceDefinition { + requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextensionsv1beta1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiextensionsv1beta1.CustomResourceDefinition) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go new file mode 100644 index 0000000000..8490017e1c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go @@ -0,0 +1,34 @@ +package resourceread + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + appsScheme = runtime.NewScheme() + appsCodecs = serializer.NewCodecFactory(appsScheme) +) + +func init() { + if err := appsv1.AddToScheme(appsScheme); err != nil { + panic(err) + } +} + +func ReadDeploymentV1OrDie(objBytes []byte) *appsv1.Deployment { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.Deployment) +} + +func ReadDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.DaemonSet) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go new file mode 100644 index 0000000000..ac2b477585 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go @@ -0,0 +1,70 @@ +package resourceread + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + coreScheme = runtime.NewScheme() + coreCodecs = serializer.NewCodecFactory(coreScheme) +) + +func init() { + if err := corev1.AddToScheme(coreScheme); err != nil { + panic(err) + } +} + +func ReadConfigMapV1OrDie(objBytes []byte) *corev1.ConfigMap { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ConfigMap) +} + +func ReadSecretV1OrDie(objBytes []byte) *corev1.Secret { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Secret) +} + +func ReadNamespaceV1OrDie(objBytes []byte) *corev1.Namespace { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Namespace) +} + +func ReadServiceAccountV1OrDie(objBytes []byte) *corev1.ServiceAccount { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ServiceAccount) +} + +func ReadServiceV1OrDie(objBytes []byte) *corev1.Service { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Service) +} + +func ReadPodV1OrDie(objBytes []byte) *corev1.Pod { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Pod) +} + +func WritePodV1OrDie(obj *corev1.Pod) string { + return runtime.EncodeOrDie(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), obj) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go new file mode 100644 index 0000000000..bf14899d88 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go @@ -0,0 +1,50 @@ +package resourceread + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + rbacScheme = runtime.NewScheme() + rbacCodecs = serializer.NewCodecFactory(rbacScheme) +) + +func init() { + if err := rbacv1.AddToScheme(rbacScheme); err != nil { + panic(err) + } +} + +func ReadClusterRoleBindingV1OrDie(objBytes []byte) *rbacv1.ClusterRoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRoleBinding) +} + +func ReadClusterRoleV1OrDie(objBytes []byte) *rbacv1.ClusterRole { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRole) +} + +func ReadRoleBindingV1OrDie(objBytes []byte) *rbacv1.RoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.RoleBinding) +} + +func ReadRoleV1OrDie(objBytes []byte) *rbacv1.Role { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.Role) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go new file mode 100644 index 0000000000..3a488870eb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + storageScheme = runtime.NewScheme() + storageCodecs = serializer.NewCodecFactory(storageScheme) +) + +func init() { + if err := storagev1.AddToScheme(storageScheme); err != nil { + panic(err) + } +} + +func ReadStorageClassV1OrDie(objBytes []byte) *storagev1.StorageClass { + requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*storagev1.StorageClass) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go new file mode 100644 index 0000000000..bd24a5d85b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go @@ -0,0 +1,59 @@ +package retry + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" +) + +// ignoreConnectionErrors is a wrapper for condition function that will cause to retry on all errors like +// connection refused, EOF, no route to host, etc. but also all 50x API server errors. +// This wrapper will return immediately on HTTP 40x client errors and those will not be retried. +func ignoreConnectionErrors(lastError *error, fn ConditionWithContextFunc) ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + done, err := fn(ctx) + switch { + case done: + return true, err + case err == nil: + return true, nil + case IsHTTPClientError(err): + return false, err + default: + *lastError = err + return false, nil + } + } +} + +// RetryOnConnectionErrors will take context and condition function and retry the condition function until: +// 1) no error is returned +// 2) a client (4xx) HTTP error is returned +// 3) the context passed to the condition function is done +// 4) numbers of steps in the exponential backoff are met +// In case of 3) or 4) the error returned will be the last observed error from the condition function. +func RetryOnConnectionErrors(ctx context.Context, fn ConditionWithContextFunc) error { + var lastRetryErr error + err := ExponentialBackoffWithContext(ctx, retry.DefaultBackoff, ignoreConnectionErrors(&lastRetryErr, fn)) + switch err { + case wait.ErrWaitTimeout: + if lastRetryErr != nil { + return lastRetryErr + } + return err + default: + return err + } +} + +// IsHTTPClientError indicates whether the error passes is an 4xx API server error (client error). +func IsHTTPClientError(err error) bool { + switch t := err.(type) { + case errors.APIStatus: + return t.Status().Code >= 400 && t.Status().Code < 500 + default: + return false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go new file mode 100644 index 0000000000..49d7e600a3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go @@ -0,0 +1,36 @@ +package retry + +import ( + "context" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// TODO: This should be added to k8s.io/client-go/util/retry + +// ConditionWithContextFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. The context passed to condition function allow function body +// to return faster than context.Done(). +type ConditionWithContextFunc func(ctx context.Context) (done bool, err error) + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff and stop repeating +// when the context passed to this function is done. +// +// It checks the condition up to Steps times, increasing the wait by multiplying +// the previous duration by Factor. +// +// If Jitter is greater than zero, a random amount of each duration is added +// (between duration and duration*(1+jitter)). +// +// If the condition never returns true, ErrWaitTimeout is returned. All other +// errors terminate immediately. +func ExponentialBackoffWithContext(ctx context.Context, backoff wait.Backoff, condition ConditionWithContextFunc) error { + return wait.ExponentialBackoff(backoff, func() (bool, error) { + select { + case <-ctx.Done(): + return false, wait.ErrWaitTimeout + default: + return condition(ctx) + } + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go new file mode 100644 index 0000000000..f5a26338b7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go @@ -0,0 +1,67 @@ +package resourcesynccontroller + +import ( + "crypto/x509" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/cert" + + "github.com/openshift/library-go/pkg/crypto" +) + +func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) { + certificates := []*x509.Certificate{} + for _, input := range inputConfigMaps { + inputConfigMap, err := lister.ConfigMaps(input.Namespace).Get(input.Name) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + + // configmaps must conform to this + inputContent := inputConfigMap.Data["ca-bundle.crt"] + if len(inputContent) == 0 { + continue + } + inputCerts, err := cert.ParseCertsPEM([]byte(inputContent)) + if err != nil { + return nil, fmt.Errorf("configmap/%s in %q is malformed: %v", input.Name, input.Namespace, err) + } + certificates = append(certificates, inputCerts...) + } + + certificates = crypto.FilterExpiredCerts(certificates...) + finalCertificates := []*x509.Certificate{} + // now check for duplicates. n^2, but super simple + for i := range certificates { + found := false + for j := range finalCertificates { + if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { + found = true + break + } + } + if !found { + finalCertificates = append(finalCertificates, certificates[i]) + } + } + + caBytes, err := crypto.EncodeCertificates(finalCertificates...) + if err != nil { + return nil, err + } + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: destinationConfigMap.Namespace, Name: destinationConfigMap.Name}, + Data: map[string]string{ + "ca-bundle.crt": string(caBytes), + }, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go new file mode 100644 index 0000000000..344eddd830 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go @@ -0,0 +1,19 @@ +package resourcesynccontroller + +// ResourceLocation describes coordinates for a resource to be synced +type ResourceLocation struct { + Namespace string `json:"namespace"` + Name string `json:"name"` +} + +var emptyResourceLocation = ResourceLocation{} + +// ResourceSyncer allows changes to syncing rules by this controller +type ResourceSyncer interface { + // SyncConfigMap indicates that a configmap should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncConfigMap(destination, source ResourceLocation) error + // SyncSecret indicates that a secret should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncSecret(destination, source ResourceLocation) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go new file mode 100644 index 0000000000..4d4cc4f127 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -0,0 +1,329 @@ +package resourcesynccontroller + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + operatorStatusResourceSyncControllerDegraded = "ResourceSyncControllerDegraded" + controllerWorkQueueKey = "key" +) + +// ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. +// It will also mirror deletions by deleting destinations. +type ResourceSyncController struct { + // syncRuleLock is used to ensure we avoid races on changes to syncing rules + syncRuleLock sync.RWMutex + // configMapSyncRules is a map from destination location to source location + configMapSyncRules map[ResourceLocation]ResourceLocation + // secretSyncRules is a map from destination location to source location + secretSyncRules map[ResourceLocation]ResourceLocation + + // knownNamespaces is the list of namespaces we are watching. + knownNamespaces sets.String + + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces + operatorConfigClient v1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +var _ ResourceSyncer = &ResourceSyncController{} + +// NewResourceSyncController creates ResourceSyncController. +func NewResourceSyncController( + operatorConfigClient v1helpers.OperatorClient, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + secretsGetter corev1client.SecretsGetter, + configMapsGetter corev1client.ConfigMapsGetter, + eventRecorder events.Recorder, +) *ResourceSyncController { + c := &ResourceSyncController{ + operatorConfigClient: operatorConfigClient, + eventRecorder: eventRecorder.WithComponentSuffix("resource-sync-controller"), + + configMapSyncRules: map[ResourceLocation]ResourceLocation{}, + secretSyncRules: map[ResourceLocation]ResourceLocation{}, + kubeInformersForNamespaces: kubeInformersForNamespaces, + knownNamespaces: kubeInformersForNamespaces.Namespaces(), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ResourceSyncController"), + configMapGetter: configMapsGetter, + secretGetter: secretsGetter, + } + + for namespace := range kubeInformersForNamespaces.Namespaces() { + if len(namespace) == 0 { + continue + } + informers := kubeInformersForNamespaces.InformersFor(namespace) + informers.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) + informers.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, informers.Core().V1().ConfigMaps().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, informers.Core().V1().Secrets().Informer().HasSynced) + } + + // we watch this just in case someone messes with our status + operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorConfigClient.Informer().HasSynced) + + return c +} + +func (c *ResourceSyncController) SyncConfigMap(destination, source ResourceLocation) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.configMapSyncRules[destination] = source + + // make sure the new rule is picked up + c.queue.Add(controllerWorkQueueKey) + return nil +} + +func (c *ResourceSyncController) SyncSecret(destination, source ResourceLocation) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.secretSyncRules[destination] = source + + // make sure the new rule is picked up + c.queue.Add(controllerWorkQueueKey) + return nil +} + +func (c *ResourceSyncController) sync() error { + operatorSpec, _, _, err := c.operatorConfigClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + c.syncRuleLock.RLock() + defer c.syncRuleLock.RUnlock() + + errors := []error{} + + for destination, source := range c.configMapSyncRules { + if source == emptyResourceLocation { + // use the cache to check whether the configmap exists in target namespace, if not skip the extra delete call. + if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.configMapGetter.ConfigMaps(destination.Namespace).Delete(destination.Name, nil); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncConfigMap(c.configMapGetter, c.eventRecorder, source.Namespace, source.Name, destination.Namespace, destination.Name, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, err) + } + } + for destination, source := range c.secretSyncRules { + if source == emptyResourceLocation { + // use the cache to check whether the secret exists in target namespace, if not skip the extra delete call. + if _, err := c.secretGetter.Secrets(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.secretGetter.Secrets(destination.Namespace).Delete(destination.Name, nil); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncSecret(c.secretGetter, c.eventRecorder, source.Namespace, source.Name, destination.Namespace, destination.Name, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + cond := operatorv1.OperatorCondition{ + Type: operatorStatusResourceSyncControllerDegraded, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: v1helpers.NewMultiLineAggregate(errors).Error(), + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: operatorStatusResourceSyncControllerDegraded, + Status: operatorv1.ConditionFalse, + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil +} + +func (c *ResourceSyncController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting ResourceSyncController") + defer klog.Infof("Shutting down ResourceSyncController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *ResourceSyncController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ResourceSyncController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *ResourceSyncController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} + +func NewDebugHandler(controller *ResourceSyncController) http.Handler { + return &debugHTTPHandler{controller: controller} +} + +type debugHTTPHandler struct { + controller *ResourceSyncController +} + +type ResourceSyncRule struct { + Source ResourceLocation `json:"source"` + Destination ResourceLocation `json:"destination"` +} + +type ResourceSyncRuleList []ResourceSyncRule + +func (l ResourceSyncRuleList) Len() int { return len(l) } +func (l ResourceSyncRuleList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l ResourceSyncRuleList) Less(i, j int) bool { + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) < 0 { + return true + } + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) > 0 { + return false + } + if strings.Compare(l[i].Source.Name, l[j].Source.Name) < 0 { + return true + } + return false +} + +type ControllerSyncRules struct { + Secrets ResourceSyncRuleList `json:"secrets"` + Configs ResourceSyncRuleList `json:"configs"` +} + +// ServeSyncRules provides a handler function to return the sync rules of the controller +func (h *debugHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + syncRules := ControllerSyncRules{ResourceSyncRuleList{}, ResourceSyncRuleList{}} + + h.controller.syncRuleLock.RLock() + defer h.controller.syncRuleLock.RUnlock() + syncRules.Secrets = append(syncRules.Secrets, resourceSyncRuleList(h.controller.secretSyncRules)...) + syncRules.Configs = append(syncRules.Configs, resourceSyncRuleList(h.controller.configMapSyncRules)...) + + data, err := json.Marshal(syncRules) + if err != nil { + w.Write([]byte(err.Error())) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Write(data) + w.WriteHeader(http.StatusOK) +} + +func resourceSyncRuleList(syncRules map[ResourceLocation]ResourceLocation) ResourceSyncRuleList { + rules := make(ResourceSyncRuleList, 0, len(syncRules)) + for src, dest := range syncRules { + rule := ResourceSyncRule{ + Source: src, + Destination: dest, + } + rules = append(rules, rule) + } + sort.Sort(rules) + return rules +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go new file mode 100644 index 0000000000..00586222c8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go @@ -0,0 +1,119 @@ +package certsyncpod + +import ( + "io/ioutil" + "os" + "time" + + "github.com/spf13/cobra" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" +) + +type CertSyncControllerOptions struct { + KubeConfigFile string + Namespace string + DestinationDir string + + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + + kubeClient kubernetes.Interface +} + +func NewCertSyncControllerCommand(configmaps, secrets []revision.RevisionResource) *cobra.Command { + o := &CertSyncControllerOptions{ + configMaps: configmaps, + secrets: secrets, + } + + cmd := &cobra.Command{ + Use: "cert-syncer --kubeconfig=kubeconfigfile", + Run: func(cmd *cobra.Command, args []string) { + if err := o.Complete(); err != nil { + klog.Fatal(err) + } + if err := o.Run(); err != nil { + klog.Fatal(err) + } + }, + } + + cmd.Flags().StringVar(&o.DestinationDir, "destination-dir", o.DestinationDir, "Directory to write to") + cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "Namespace to read from") + cmd.Flags().StringVar(&o.KubeConfigFile, "kubeconfig", o.KubeConfigFile, "Location of the master configuration file to run from.") + + return cmd +} + +func (o *CertSyncControllerOptions) Run() error { + // When the kubeconfig content change, commit suicide to reload its content. + observer, err := fileobserver.NewObserver(500 * time.Millisecond) + if err != nil { + return err + } + + initialContent, _ := ioutil.ReadFile(o.KubeConfigFile) + observer.AddReactor(fileobserver.ExitOnChangeReactor, map[string][]byte{o.KubeConfigFile: initialContent}, o.KubeConfigFile) + + stopCh := make(chan struct{}) + go observer.Run(stopCh) + + kubeInformers := informers.NewSharedInformerFactoryWithOptions(o.kubeClient, 10*time.Minute, informers.WithNamespace(o.Namespace)) + go kubeInformers.Start(stopCh) + + eventRecorder := events.NewKubeRecorder(o.kubeClient.CoreV1().Events(o.Namespace), "cert-syncer", + &corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: os.Getenv("POD_NAMESPACE"), + Name: os.Getenv("POD_NAME"), + }) + + controller, err := NewCertSyncController( + o.DestinationDir, + o.Namespace, + o.configMaps, + o.secrets, + kubeInformers, + eventRecorder, + ) + if err != nil { + return err + } + go controller.Run(1, stopCh) + + <-stopCh + klog.Infof("Shutting down certificate syncer") + + return nil +} + +func (o *CertSyncControllerOptions) Complete() error { + kubeConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfigFile, nil) + if err != nil { + return err + } + + protoKubeConfig := rest.CopyConfig(kubeConfig) + protoKubeConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoKubeConfig.ContentType = "application/vnd.kubernetes.protobuf" + + // This kube client use protobuf, do not use it for CR + kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) + if err != nil { + return err + } + o.kubeClient = kubeClient + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go new file mode 100644 index 0000000000..397adb0f63 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go @@ -0,0 +1,203 @@ +package certsyncpod + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "time" + + "k8s.io/klog" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" +) + +type CertSyncController struct { + destinationDir string + namespace string + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + + configMapLister v1.ConfigMapLister + secretLister v1.SecretLister + eventRecorder events.Recorder + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface + preRunCaches []cache.InformerSynced +} + +func NewCertSyncController(targetDir, targetNamespace string, configmaps, secrets []revision.RevisionResource, informers informers.SharedInformerFactory, eventRecorder events.Recorder) (*CertSyncController, error) { + c := &CertSyncController{ + destinationDir: targetDir, + namespace: targetNamespace, + configMaps: configmaps, + secrets: secrets, + eventRecorder: eventRecorder.WithComponentSuffix("cert-sync-controller"), + + configMapLister: informers.Core().V1().ConfigMaps().Lister(), + secretLister: informers.Core().V1().Secrets().Lister(), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CertSyncController"), + preRunCaches: []cache.InformerSynced{ + informers.Core().V1().ConfigMaps().Informer().HasSynced, + informers.Core().V1().Secrets().Informer().HasSynced, + }, + } + + informers.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) + informers.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + + return c, nil +} + +func getConfigMapDir(targetDir, configMapName string) string { + return filepath.Join(targetDir, "configmaps", configMapName) +} + +func getSecretDir(targetDir, secretName string) string { + return filepath.Join(targetDir, "secrets", secretName) +} + +func (c *CertSyncController) sync() error { + errors := []error{} + + for _, cm := range c.configMaps { + configMap, err := c.configMapLister.ConfigMaps(c.namespace).Get(cm.Name) + switch { + case apierrors.IsNotFound(err) && !cm.Optional: + errors = append(errors, err) + continue + case apierrors.IsNotFound(err) && cm.Optional: + // remove missing content + if err := os.RemoveAll(getConfigMapDir(c.destinationDir, cm.Name)); err != nil { + errors = append(errors, err) + } + continue + case err != nil: + errors = append(errors, err) + continue + } + + contentDir := getConfigMapDir(c.destinationDir, cm.Name) + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + errors = append(errors, err) + continue + } + for filename, content := range configMap.Data { + fullFilename := filepath.Join(contentDir, filename) + // if the existing is the same, do nothing + if existingContent, err := ioutil.ReadFile(fullFilename); err == nil && reflect.DeepEqual(existingContent, []byte(content)) { + continue + } + + klog.Infof("Writing configmap manifest %q ...", fullFilename) + if err := ioutil.WriteFile(fullFilename, []byte(content), 0644); err != nil { + errors = append(errors, err) + continue + } + } + } + + for _, s := range c.secrets { + secret, err := c.secretLister.Secrets(c.namespace).Get(s.Name) + switch { + case apierrors.IsNotFound(err) && !s.Optional: + errors = append(errors, err) + continue + case apierrors.IsNotFound(err) && s.Optional: + // remove missing content + if err := os.RemoveAll(getSecretDir(c.destinationDir, s.Name)); err != nil { + errors = append(errors, err) + } + continue + case err != nil: + errors = append(errors, err) + continue + } + + contentDir := getSecretDir(c.destinationDir, s.Name) + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + errors = append(errors, err) + continue + } + for filename, content := range secret.Data { + // TODO fix permissions + fullFilename := filepath.Join(contentDir, filename) + // if the existing is the same, do nothing + if existingContent, err := ioutil.ReadFile(fullFilename); err == nil && reflect.DeepEqual(existingContent, content) { + continue + } + + klog.Infof("Writing secret manifest %q ...", fullFilename) + if err := ioutil.WriteFile(fullFilename, content, 0644); err != nil { + errors = append(errors, err) + continue + } + } + } + + return utilerrors.NewAggregate(errors) +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *CertSyncController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting CertSyncer") + defer klog.Infof("Shutting down CertSyncer") + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *CertSyncController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *CertSyncController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +const workQueueKey = "key" + +// eventHandler queues the operator to check spec and status +func (c *CertSyncController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go new file mode 100644 index 0000000000..9c55363dfe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go @@ -0,0 +1,181 @@ +package backingresource + +import ( + "fmt" + "path/filepath" + "time" + + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "k8s.io/klog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corelisterv1 "k8s.io/client-go/listers/core/v1" + rbaclisterv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata" +) + +const ( + operatorStatusBackingResourceControllerDegraded = "BackingResourceControllerDegraded" + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/backingresource" +) + +// BackingResourceController is a controller that watches the operator config and updates +// service accounts and RBAC rules in the target namespace according to the bindata manifests +// (templated with the config) if they differ. +type BackingResourceController struct { + targetNamespace string + + operatorClient v1helpers.OperatorClient + saLister corelisterv1.ServiceAccountLister + clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister + kubeClient kubernetes.Interface + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewBackingResourceController creates a new backing resource controller. +func NewBackingResourceController( + targetNamespace string, + operatorClient v1helpers.OperatorClient, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + kubeClient kubernetes.Interface, + eventRecorder events.Recorder, +) *BackingResourceController { + c := &BackingResourceController{ + targetNamespace: targetNamespace, + operatorClient: operatorClient, + + saLister: kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Lister(), + clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), + kubeClient: kubeClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "BackingResourceController"), + eventRecorder: eventRecorder.WithComponentSuffix("backing-resource-controller"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().HasSynced) + + return c +} + +func (c BackingResourceController) mustTemplateAsset(name string) ([]byte, error) { + config := struct { + TargetNamespace string + }{ + TargetNamespace: c.targetNamespace, + } + return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join(manifestDir, name)), config).Data, nil +} + +func (c BackingResourceController) sync() error { + operatorSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + directResourceResults := resourceapply.ApplyDirectly(c.kubeClient, c.eventRecorder, c.mustTemplateAsset, + "manifests/installer-sa.yaml", + "manifests/installer-cluster-rolebinding.yaml", + ) + + errs := []error{} + for _, currResult := range directResourceResults { + if currResult.Error != nil { + errs = append(errs, fmt.Errorf("%q (%T): %v", currResult.File, currResult.Type, currResult.Error)) + } + } + err = v1helpers.NewMultiLineAggregate(errs) + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: operatorStatusBackingResourceControllerDegraded, + Status: operatorv1.ConditionFalse, + } + if err != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *BackingResourceController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting BackingResourceController") + defer klog.Infof("Shutting down BackingResourceController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *BackingResourceController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *BackingResourceController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *BackingResourceController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go new file mode 100644 index 0000000000..7aad2ff00e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go @@ -0,0 +1,258 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml +// pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:openshift:operator:{{ .TargetNamespace }}-installer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + namespace: {{ .TargetNamespace }} + name: installer-sa +`) + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml, nil +} + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{ .TargetNamespace }} + name: installer-sa +`) + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml, nil +} + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml": pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml, + "pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml": pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "backingresource": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "installer-cluster-rolebinding.yaml": {pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml, map[string]*bintree{}}, + "installer-sa.yaml": {pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml new file mode 100644 index 0000000000..ed055ada4b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:openshift:operator:{{ .TargetNamespace }}-installer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + namespace: {{ .TargetNamespace }} + name: installer-sa diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml new file mode 100644 index 0000000000..d389483b24 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{ .TargetNamespace }} + name: installer-sa diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go new file mode 100644 index 0000000000..eb39499b49 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go @@ -0,0 +1,263 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml = []byte(`apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: installer +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: installer + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + resources: + requests: + memory: 100M + limits: + memory: 100M + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir`) + +func pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml, nil +} + +func pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml": pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "installer": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "installer-pod.yaml": {pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go new file mode 100644 index 0000000000..0e7dc4242b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go @@ -0,0 +1,843 @@ +package installer + +import ( + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/loglevel" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + operatorStatusInstallerControllerDegraded = "InstallerControllerDegraded" + nodeInstallerDegraded = "NodeInstallerDegraded" + installerControllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/installer" + manifestInstallerPodPath = "manifests/installer-pod.yaml" + + hostResourceDirDir = "/etc/kubernetes/static-pod-resources" + hostPodManifestDir = "/etc/kubernetes/manifests" + + revisionLabel = "revision" + statusConfigMapName = "revision-status" +) + +// InstallerController is a controller that watches the currentRevision and targetRevision fields for each node and spawn +// installer pods to update the static pods on the master nodes. +type InstallerController struct { + targetNamespace, staticPodName string + // configMaps is the list of configmaps that are directly copied.A different actor/controller modifies these. + // the first element should be the configmap that contains the static pod manifest + configMaps []revision.RevisionResource + // secrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these. + secrets []revision.RevisionResource + // command is the string to use for the installer pod command + command []string + + // these are copied separately at the beginning to a fixed location + certConfigMaps []revision.RevisionResource + certSecrets []revision.RevisionResource + certDir string + + operatorClient v1helpers.StaticPodOperatorClient + + configMapsGetter corev1client.ConfigMapsGetter + secretsGetter corev1client.SecretsGetter + podsGetter corev1client.PodsGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + + // installerPodImageFn returns the image name for the installer pod + installerPodImageFn func() string + // ownerRefsFn sets the ownerrefs on the pruner pod + ownerRefsFn func(revision int32) ([]metav1.OwnerReference, error) + + installerPodMutationFns []InstallerPodMutationFunc +} + +// InstallerPodMutationFunc is a function that has a chance at changing the installer pod before it is created +type InstallerPodMutationFunc func(pod *corev1.Pod, nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error + +func (o *InstallerController) WithInstallerPodMutationFn(installerPodMutationFn InstallerPodMutationFunc) *InstallerController { + o.installerPodMutationFns = append(o.installerPodMutationFns, installerPodMutationFn) + return o +} + +func (o *InstallerController) WithCerts(certDir string, certConfigMaps, certSecrets []revision.RevisionResource) *InstallerController { + o.certDir = certDir + o.certConfigMaps = certConfigMaps + o.certSecrets = certSecrets + return o +} + +// staticPodState is the status of a static pod that has been installed to a node. +type staticPodState int + +const ( + // staticPodStatePending means that the installed static pod is not up yet. + staticPodStatePending = staticPodState(iota) + // staticPodStateReady means that the installed static pod is ready. + staticPodStateReady + // staticPodStateFailed means that the static pod installation of a node has failed. + staticPodStateFailed +) + +// NewInstallerController creates a new installer controller. +func NewInstallerController( + targetNamespace, staticPodName string, + configMaps []revision.RevisionResource, + secrets []revision.RevisionResource, + command []string, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + operatorClient v1helpers.StaticPodOperatorClient, + configMapsGetter corev1client.ConfigMapsGetter, + secretsGetter corev1client.SecretsGetter, + podsGetter corev1client.PodsGetter, + eventRecorder events.Recorder, +) *InstallerController { + c := &InstallerController{ + targetNamespace: targetNamespace, + staticPodName: staticPodName, + configMaps: configMaps, + secrets: secrets, + command: command, + + operatorClient: operatorClient, + configMapsGetter: configMapsGetter, + secretsGetter: secretsGetter, + podsGetter: podsGetter, + eventRecorder: eventRecorder.WithComponentSuffix("installer-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerController"), + + installerPodImageFn: getInstallerPodImageFromEnv, + } + + c.ownerRefsFn = c.setOwnerRefs + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + + return c +} + +func (c *InstallerController) getStaticPodState(nodeName string) (state staticPodState, revision string, errors []string, err error) { + pod, err := c.podsGetter.Pods(c.targetNamespace).Get(mirrorPodNameForNode(c.staticPodName, nodeName), metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return staticPodStatePending, "", nil, nil + } + return staticPodStatePending, "", nil, err + } + switch pod.Status.Phase { + case corev1.PodRunning, corev1.PodSucceeded: + for _, c := range pod.Status.Conditions { + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return staticPodStateReady, pod.Labels[revisionLabel], nil, nil + } + } + case corev1.PodFailed: + return staticPodStateFailed, pod.Labels[revisionLabel], []string{pod.Status.Message}, nil + } + + return staticPodStatePending, "", nil, nil +} + +// nodeToStartRevisionWith returns a node index i and guarantees for every node < i that it is +// - not updating +// - ready +// - at the revision claimed in CurrentRevision. +func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state staticPodState, revision string, errors []string, err error), nodes []operatorv1.NodeStatus) (int, error) { + if len(nodes) == 0 { + return 0, fmt.Errorf("nodes array cannot be empty") + } + + // find upgrading node as this will be the first to start new revision (to minimize number of down nodes) + for i := range nodes { + if nodes[i].TargetRevision != 0 { + return i, nil + } + } + + // otherwise try to find a node that is not ready. Take the oldest one. + oldestNotReadyRevisionNode := -1 + oldestNotReadyRevision := math.MaxInt32 + for i := range nodes { + currNodeState := &nodes[i] + state, revision, _, err := getStaticPodState(currNodeState.NodeName) + if err != nil && apierrors.IsNotFound(err) { + return i, nil + } + if err != nil { + return 0, err + } + revisionNum, err := strconv.Atoi(revision) + if err != nil { + return i, nil + } + if state != staticPodStateReady && revisionNum < oldestNotReadyRevision { + oldestNotReadyRevisionNode = i + oldestNotReadyRevision = revisionNum + } + } + if oldestNotReadyRevisionNode >= 0 { + return oldestNotReadyRevisionNode, nil + } + + // find a node that is has the wrong revision. Take the oldest one. + oldestPodRevisionNode := -1 + oldestPodRevision := math.MaxInt32 + for i := range nodes { + currNodeState := &nodes[i] + _, revision, _, err := getStaticPodState(currNodeState.NodeName) + if err != nil { + return 0, err + } + revisionNum, err := strconv.Atoi(revision) + if err != nil { + return i, nil + } + if revisionNum != int(currNodeState.CurrentRevision) && revisionNum < oldestPodRevision { + oldestPodRevisionNode = i + oldestPodRevision = revisionNum + } + } + if oldestPodRevisionNode >= 0 { + return oldestPodRevisionNode, nil + } + + // last but not least, choose the one with the older current revision. This will imply that failed installer pods will be retried. + oldestCurrentRevisionNode := -1 + oldestCurrentRevision := int32(math.MaxInt32) + for i := range nodes { + currNodeState := &nodes[i] + if currNodeState.CurrentRevision < oldestCurrentRevision { + oldestCurrentRevisionNode = i + oldestCurrentRevision = currNodeState.CurrentRevision + } + } + if oldestCurrentRevisionNode >= 0 { + return oldestCurrentRevisionNode, nil + } + + return 0, nil +} + +// manageInstallationPods takes care of creating content for the static pods to install. +// returns whether or not requeue and if an error happened when updating status. Normally it updates status itself. +func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.StaticPodOperatorSpec, originalOperatorStatus *operatorv1.StaticPodOperatorStatus, resourceVersion string) (bool, error) { + operatorStatus := originalOperatorStatus.DeepCopy() + + if len(operatorStatus.NodeStatuses) == 0 { + return false, nil + } + + // stop on first deployment failure of the latest revision (excluding OOM, that never sets LatestAvailableRevision). + for _, s := range operatorStatus.NodeStatuses { + if s.LastFailedRevision == operatorStatus.LatestAvailableRevision { + return false, nil + } + } + + // start with node which is in worst state (instead of terminating healthy pods first) + startNode, err := nodeToStartRevisionWith(c.getStaticPodState, operatorStatus.NodeStatuses) + if err != nil { + return true, err + } + + for l := 0; l < len(operatorStatus.NodeStatuses); l++ { + i := (startNode + l) % len(operatorStatus.NodeStatuses) + + var currNodeState *operatorv1.NodeStatus + var prevNodeState *operatorv1.NodeStatus + currNodeState = &operatorStatus.NodeStatuses[i] + if l > 0 { + prev := (startNode + l - 1) % len(operatorStatus.NodeStatuses) + prevNodeState = &operatorStatus.NodeStatuses[prev] + } + + // if we are in a transition, check to see whether our installer pod completed + if currNodeState.TargetRevision > currNodeState.CurrentRevision { + if err := c.ensureInstallerPod(currNodeState.NodeName, operatorSpec, currNodeState.TargetRevision); err != nil { + c.eventRecorder.Warningf("InstallerPodFailed", "Failed to create installer pod for revision %d on node %q: %v", + currNodeState.TargetRevision, currNodeState.NodeName, err) + return true, err + } + + pendingNewRevision := operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision + newCurrNodeState, installerPodFailed, err := c.newNodeStateForInstallInProgress(currNodeState, pendingNewRevision) + if err != nil { + return true, err + } + + // if we make a change to this status, we want to write it out to the API before we commence work on the next node. + // it's an extra write/read, but it makes the state debuggable from outside this process + if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { + klog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState)) + newOperatorStatus, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions) + if updateError != nil { + return false, updateError + } else if updated && currNodeState.CurrentRevision != newCurrNodeState.CurrentRevision { + c.eventRecorder.Eventf("NodeCurrentRevisionChanged", "Updated node %q from revision %d to %d", currNodeState.NodeName, + currNodeState.CurrentRevision, newCurrNodeState.CurrentRevision) + } + if err := c.updateRevisionStatus(newOperatorStatus); err != nil { + klog.Errorf("error updating revision status configmap: %v", err) + } + return false, nil + } else { + klog.V(2).Infof("%q is in transition to %d, but has not made progress", currNodeState.NodeName, currNodeState.TargetRevision) + } + + // We want to retry the installer pod by deleting and then rekicking. Also we don't set LastFailedRevision. + if !installerPodFailed { + break + } + klog.Infof("Retrying %q for revision %d because it failed", currNodeState.NodeName, currNodeState.TargetRevision) + installerPodName := getInstallerPodName(currNodeState.TargetRevision, currNodeState.NodeName) + if err := c.podsGetter.Pods(c.targetNamespace).Delete(installerPodName, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + return true, err + } + } + + revisionToStart := c.getRevisionToStart(currNodeState, prevNodeState, operatorStatus) + if revisionToStart == 0 { + klog.V(4).Infof("%q does not need update", currNodeState.NodeName) + continue + } + klog.Infof("%q needs new revision %d", currNodeState.NodeName, revisionToStart) + + newCurrNodeState := currNodeState.DeepCopy() + newCurrNodeState.TargetRevision = revisionToStart + newCurrNodeState.LastFailedRevisionErrors = nil + + // if we make a change to this status, we want to write it out to the API before we commence work on the next node. + // it's an extra write/read, but it makes the state debuggable from outside this process + if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { + klog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState)) + if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { + return false, updateError + } else if updated && currNodeState.TargetRevision != newCurrNodeState.TargetRevision && newCurrNodeState.TargetRevision != 0 { + c.eventRecorder.Eventf("NodeTargetRevisionChanged", "Updating node %q from revision %d to %d", currNodeState.NodeName, + currNodeState.CurrentRevision, newCurrNodeState.TargetRevision) + } + + return false, nil + } + break + } + + return false, nil +} + +func (c *InstallerController) updateRevisionStatus(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + failedRevisions := make(map[int32]struct{}) + currentRevisions := make(map[int32]struct{}) + for _, nodeState := range operatorStatus.NodeStatuses { + failedRevisions[nodeState.LastFailedRevision] = struct{}{} + currentRevisions[nodeState.CurrentRevision] = struct{}{} + } + delete(failedRevisions, 0) + + // If all current revisions point to the same revision, then mark it successful + if len(currentRevisions) == 1 { + err := c.updateConfigMapForRevision(currentRevisions, string(corev1.PodSucceeded)) + if err != nil { + return err + } + } + return c.updateConfigMapForRevision(failedRevisions, string(corev1.PodFailed)) +} + +func (c *InstallerController) updateConfigMapForRevision(currentRevisions map[int32]struct{}, status string) error { + for currentRevision := range currentRevisions { + statusConfigMap, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(statusConfigMapNameForRevision(currentRevision), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.Infof("%s configmap not found, skipping update revision status", statusConfigMapNameForRevision(currentRevision)) + continue + } + if err != nil { + return err + } + statusConfigMap.Data["status"] = status + _, _, err = resourceapply.ApplyConfigMap(c.configMapsGetter, c.eventRecorder, statusConfigMap) + if err != nil { + return err + } + } + return nil +} + +func setNodeStatusFn(status *operatorv1.NodeStatus) v1helpers.UpdateStaticPodStatusFunc { + return func(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + for i := range operatorStatus.NodeStatuses { + if operatorStatus.NodeStatuses[i].NodeName == status.NodeName { + operatorStatus.NodeStatuses[i] = *status + break + } + } + return nil + } +} + +// setAvailableProgressingConditions sets the Available and Progressing conditions +func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1.StaticPodOperatorStatus) error { + // Available means that we have at least one pod at the latest level + numAvailable := 0 + numAtLatestRevision := 0 + numProgressing := 0 + counts := map[int32]int{} + failingCount := map[int32]int{} + failing := map[int32][]string{} + for _, currNodeStatus := range newStatus.NodeStatuses { + counts[currNodeStatus.CurrentRevision] = counts[currNodeStatus.CurrentRevision] + 1 + if currNodeStatus.CurrentRevision != 0 { + numAvailable++ + } + + // keep track of failures so that we can report failing status + if currNodeStatus.LastFailedRevision != 0 { + failingCount[currNodeStatus.LastFailedRevision] = failingCount[currNodeStatus.LastFailedRevision] + 1 + failing[currNodeStatus.LastFailedRevision] = append(failing[currNodeStatus.LastFailedRevision], currNodeStatus.LastFailedRevisionErrors...) + } + + if newStatus.LatestAvailableRevision == currNodeStatus.CurrentRevision { + numAtLatestRevision += 1 + } else { + numProgressing += 1 + } + } + + revisionStrings := []string{} + for _, revision := range Int32KeySet(counts).List() { + count := counts[revision] + revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes are at revision %d", count, revision)) + } + // if we are progressing and no nodes have achieved that level, we should indicate + if numProgressing > 0 && counts[newStatus.LatestAvailableRevision] == 0 { + revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes have achieved new revision %d", 0, newStatus.LatestAvailableRevision)) + } + revisionDescription := strings.Join(revisionStrings, "; ") + + if numAvailable > 0 { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeAvailable, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("%d nodes are active; %s", numAvailable, revisionDescription), + }) + } else { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeAvailable, + Status: operatorv1.ConditionFalse, + Reason: "ZeroNodesActive", + Message: fmt.Sprintf("%d nodes are active; %s", numAvailable, revisionDescription), + }) + } + + // Progressing means that the any node is not at the latest available revision + if numProgressing > 0 { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeProgressing, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("%s", revisionDescription), + }) + } else { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeProgressing, + Status: operatorv1.ConditionFalse, + Reason: "AllNodesAtLatestRevision", + Message: fmt.Sprintf("%s", revisionDescription), + }) + } + + if len(failing) > 0 { + failingStrings := []string{} + for _, failingRevision := range Int32KeySet(failing).List() { + errorStrings := failing[failingRevision] + failingStrings = append(failingStrings, fmt.Sprintf("%d nodes are failing on revision %d:\n%v", failingCount[failingRevision], failingRevision, strings.Join(errorStrings, "\n"))) + } + failingDescription := strings.Join(failingStrings, "; ") + + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: nodeInstallerDegraded, + Status: operatorv1.ConditionTrue, + Reason: "InstallerPodFailed", + Message: failingDescription, + }) + } else { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: nodeInstallerDegraded, + Status: operatorv1.ConditionFalse, + }) + } + + return nil +} + +// newNodeStateForInstallInProgress returns the new NodeState, whether it was killed by OOM or an error +func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *operatorv1.NodeStatus, newRevisionPending bool) (status *operatorv1.NodeStatus, installerPodFailed bool, err error) { + ret := currNodeState.DeepCopy() + installerPod, err := c.podsGetter.Pods(c.targetNamespace).Get(getInstallerPodName(currNodeState.TargetRevision, currNodeState.NodeName), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + ret.LastFailedRevision = currNodeState.TargetRevision + ret.TargetRevision = currNodeState.CurrentRevision + ret.LastFailedRevisionErrors = []string{err.Error()} + return ret, false, nil + } + if err != nil { + return nil, false, err + } + + failed := false + errors := []string{} + + switch installerPod.Status.Phase { + case corev1.PodSucceeded: + if newRevisionPending { + // stop early, don't wait for ready static pod because a new revision is waiting + failed = true + errors = append(errors, "static pod has been installed, but is not ready while new revision is pending") + break + } + + state, revision, failedErrors, err := c.getStaticPodState(currNodeState.NodeName) + if err != nil { + return nil, false, err + } + + if revision != strconv.Itoa(int(currNodeState.TargetRevision)) { + // new updated pod to be launched + break + } + + switch state { + case staticPodStateFailed: + failed = true + errors = failedErrors + + case staticPodStateReady: + ret.CurrentRevision = currNodeState.TargetRevision + ret.TargetRevision = 0 + ret.LastFailedRevision = 0 + ret.LastFailedRevisionErrors = nil + return ret, false, nil + } + + case corev1.PodFailed: + failed = true + for _, containerStatus := range installerPod.Status.ContainerStatuses { + if containerStatus.State.Terminated != nil && len(containerStatus.State.Terminated.Message) > 0 { + errors = append(errors, fmt.Sprintf("%s: %s", containerStatus.Name, containerStatus.State.Terminated.Message)) + c.eventRecorder.Warningf("InstallerPodFailed", "installer errors: %v", strings.Join(errors, "\n")) + // do not set LastFailedRevision + return currNodeState, true, nil + } + } + } + + if failed { + ret.LastFailedRevision = currNodeState.TargetRevision + ret.TargetRevision = 0 + if len(errors) == 0 { + errors = append(errors, "no detailed termination message, see `oc get -n %q pods/%q -oyaml`", installerPod.Namespace, installerPod.Name) + } + ret.LastFailedRevisionErrors = errors + return ret, false, nil + } + + return ret, false, nil +} + +// getRevisionToStart returns the revision we need to start or zero if none +func (c *InstallerController) getRevisionToStart(currNodeState, prevNodeState *operatorv1.NodeStatus, operatorStatus *operatorv1.StaticPodOperatorStatus) int32 { + if prevNodeState == nil { + currentAtLatest := currNodeState.CurrentRevision == operatorStatus.LatestAvailableRevision + failedAtLatest := currNodeState.LastFailedRevision == operatorStatus.LatestAvailableRevision + if !currentAtLatest && !failedAtLatest { + return operatorStatus.LatestAvailableRevision + } + return 0 + } + + prevFinished := prevNodeState.TargetRevision == 0 + prevInTransition := prevNodeState.CurrentRevision != prevNodeState.TargetRevision + if prevInTransition && !prevFinished { + return 0 + } + + prevAhead := prevNodeState.CurrentRevision > currNodeState.CurrentRevision + failedAtPrev := currNodeState.LastFailedRevision == prevNodeState.CurrentRevision + if prevAhead && !failedAtPrev { + return prevNodeState.CurrentRevision + } + + return 0 +} + +func getInstallerPodName(revision int32, nodeName string) string { + return fmt.Sprintf("installer-%d-%s", revision, nodeName) +} + +// ensureInstallerPod creates the installer pod with the secrets required to if it does not exist already +func (c *InstallerController) ensureInstallerPod(nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error { + pod := resourceread.ReadPodV1OrDie(bindata.MustAsset(filepath.Join(manifestDir, manifestInstallerPodPath))) + + pod.Namespace = c.targetNamespace + pod.Name = getInstallerPodName(revision, nodeName) + pod.Spec.NodeName = nodeName + pod.Spec.Containers[0].Image = c.installerPodImageFn() + pod.Spec.Containers[0].Command = c.command + + ownerRefs, err := c.ownerRefsFn(revision) + if err != nil { + return fmt.Errorf("unable to set installer pod ownerrefs: %+v", err) + } + pod.OwnerReferences = ownerRefs + + if c.configMaps[0].Optional { + return fmt.Errorf("pod configmap %s is required, cannot be optional", c.configMaps[0].Name) + } + + args := []string{ + fmt.Sprintf("-v=%d", loglevel.LogLevelToKlog(operatorSpec.LogLevel)), + fmt.Sprintf("--revision=%d", revision), + fmt.Sprintf("--namespace=%s", pod.Namespace), + fmt.Sprintf("--pod=%s", c.configMaps[0].Name), + fmt.Sprintf("--resource-dir=%s", hostResourceDirDir), + fmt.Sprintf("--pod-manifest-dir=%s", hostPodManifestDir), + } + for _, cm := range c.configMaps { + if cm.Optional { + args = append(args, fmt.Sprintf("--optional-configmaps=%s", cm.Name)) + } else { + args = append(args, fmt.Sprintf("--configmaps=%s", cm.Name)) + } + } + for _, s := range c.secrets { + if s.Optional { + args = append(args, fmt.Sprintf("--optional-secrets=%s", s.Name)) + } else { + args = append(args, fmt.Sprintf("--secrets=%s", s.Name)) + } + } + if len(c.certDir) > 0 { + args = append(args, fmt.Sprintf("--cert-dir=%s", filepath.Join(hostResourceDirDir, c.certDir))) + for _, cm := range c.certConfigMaps { + if cm.Optional { + args = append(args, fmt.Sprintf("--optional-cert-configmaps=%s", cm.Name)) + } else { + args = append(args, fmt.Sprintf("--cert-configmaps=%s", cm.Name)) + } + } + for _, s := range c.certSecrets { + if s.Optional { + args = append(args, fmt.Sprintf("--optional-cert-secrets=%s", s.Name)) + } else { + args = append(args, fmt.Sprintf("--cert-secrets=%s", s.Name)) + } + } + } + + pod.Spec.Containers[0].Args = args + + // Some owners need to change aspects of the pod. Things like arguments for instance + for _, fn := range c.installerPodMutationFns { + if err := fn(pod, nodeName, operatorSpec, revision); err != nil { + return err + } + } + + _, _, err = resourceapply.ApplyPod(c.podsGetter, c.eventRecorder, pod) + return err +} + +func (c *InstallerController) setOwnerRefs(revision int32) ([]metav1.OwnerReference, error) { + ownerReferences := []metav1.OwnerReference{} + statusConfigMap, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(fmt.Sprintf("revision-status-%d", revision), metav1.GetOptions{}) + if err == nil { + ownerReferences = append(ownerReferences, metav1.OwnerReference{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: statusConfigMap.Name, + UID: statusConfigMap.UID, + }) + } + return ownerReferences, err +} + +func getInstallerPodImageFromEnv() string { + return os.Getenv("OPERATOR_IMAGE") +} + +// ensureCerts makes sure that our certs are ready or it will return an error to trigger a requeue so that we try again +func (c InstallerController) ensureCerts() error { + missing := []string{} + for _, cm := range c.certConfigMaps { + if cm.Optional { + continue + } + _, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(cm.Name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing = append(missing, "configmaps/"+cm.Name) + continue + } + return err + } + for _, s := range c.certSecrets { + if s.Optional { + continue + } + _, err := c.secretsGetter.Secrets(c.targetNamespace).Get(s.Name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing = append(missing, "secrets/"+s.Name) + continue + } + return err + } + + if len(missing) == 0 { + return nil + } + + c.eventRecorder.Warningf("RequiredCertsMissing", strings.Join(missing, ",")) + + return fmt.Errorf("required certs missing: %v", strings.Join(missing, ",")) +} + +func (c InstallerController) sync() error { + operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + operatorStatus := originalOperatorStatus.DeepCopy() + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + err = c.ensureCerts() + + // Only manage installation pods when all required certs are present. + if err == nil { + requeue, syncErr := c.manageInstallationPods(operatorSpec, operatorStatus, resourceVersion) + if requeue && syncErr == nil { + return fmt.Errorf("synthetic requeue request") + } + err = syncErr + } + + // Update failing condition + // If required certs are missing, this will report degraded as we can't create installer pods because of this pre-condition. + cond := operatorv1.OperatorCondition{ + Type: operatorStatusInstallerControllerDegraded, + Status: operatorv1.ConditionFalse, + } + if err != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *InstallerController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting InstallerController") + defer klog.Infof("Shutting down InstallerController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *InstallerController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *InstallerController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *InstallerController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(installerControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(installerControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(installerControllerWorkQueueKey) }, + } +} + +func mirrorPodNameForNode(staticPodName, nodeName string) string { + return staticPodName + "-" + nodeName +} + +func statusConfigMapNameForRevision(revision int32) string { + return fmt.Sprintf("%s-%d", statusConfigMapName, revision) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go new file mode 100644 index 0000000000..87256fe20a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go @@ -0,0 +1,187 @@ +package installer + +import ( + "reflect" + "sort" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]sets.Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) { + for _, item := range items { + s[item] = sets.Empty{} + } +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml new file mode 100644 index 0000000000..c8453c002a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: installer +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: installer + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + resources: + requests: + memory: 100M + limits: + memory: 100M + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go new file mode 100644 index 0000000000..70489277cd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go @@ -0,0 +1,314 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml +// pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml +// pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring`) + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml, nil +} + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + # TODO this should be a clusterrole + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch`) + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml, nil +} + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml = []byte(`apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: monitor + namespace: {{ .TargetNamespace }} +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + metricRelabelings: + - action: drop + regex: etcd_(debugging|disk|request|server).* + sourceLabels: + - __name__ + port: https + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + serverName: apiserver.{{ .TargetNamespace }}.svc + jobLabel: component + namespaceSelector: + matchNames: + - {{ .TargetNamespace }} + selector: + matchLabels: + app: {{ .TargetNamespace }}`) + +func pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml, nil +} + +func pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml": pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml, + "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml": pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml, + "pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml": pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "monitoring": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "prometheus-role-binding.yaml": {pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml, map[string]*bintree{}}, + "prometheus-role.yaml": {pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml, map[string]*bintree{}}, + "service-monitor.yaml": {pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml new file mode 100644 index 0000000000..2b3289912f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml new file mode 100644 index 0000000000..55957ab8e3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + # TODO this should be a clusterrole + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml new file mode 100644 index 0000000000..17f93e2c66 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml @@ -0,0 +1,26 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: monitor + namespace: {{ .TargetNamespace }} +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + metricRelabelings: + - action: drop + regex: etcd_(debugging|disk|request|server).* + sourceLabels: + - __name__ + port: https + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + serverName: apiserver.{{ .TargetNamespace }}.svc + jobLabel: component + namespaceSelector: + matchNames: + - {{ .TargetNamespace }} + selector: + matchLabels: + app: {{ .TargetNamespace }} \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go new file mode 100644 index 0000000000..77b59f1373 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go @@ -0,0 +1,195 @@ +package monitoring + +import ( + "fmt" + "path/filepath" + "time" + + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "k8s.io/klog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + rbaclisterv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata" +) + +const ( + operatorStatusMonitoringResourceControllerDegraded = "MonitoringResourceControllerDegraded" + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/monitoring" +) + +type MonitoringResourceController struct { + targetNamespace string + serviceMonitorName string + + clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + operatorClient v1helpers.StaticPodOperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewMonitoringResourceController creates a new backing resource controller. +func NewMonitoringResourceController( + targetNamespace string, + serviceMonitorName string, + operatorClient v1helpers.StaticPodOperatorClient, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + kubeClient kubernetes.Interface, + dynamicClient dynamic.Interface, + eventRecorder events.Recorder, +) *MonitoringResourceController { + c := &MonitoringResourceController{ + targetNamespace: targetNamespace, + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("monitoring-resource-controller"), + serviceMonitorName: serviceMonitorName, + + clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), + cachesToSync: []cache.InformerSynced{ + kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced, + operatorClient.Informer().HasSynced, + }, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "MonitoringResourceController"), + kubeClient: kubeClient, + dynamicClient: dynamicClient, + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + // TODO: We need a dynamic informer here to observe changes to ServiceMonitor resource. + kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().HasSynced) + + return c +} + +func (c MonitoringResourceController) mustTemplateAsset(name string) ([]byte, error) { + config := struct { + TargetNamespace string + }{ + TargetNamespace: c.targetNamespace, + } + return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join(manifestDir, name)), config).Data, nil +} + +func (c MonitoringResourceController) sync() error { + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + directResourceResults := resourceapply.ApplyDirectly(c.kubeClient, c.eventRecorder, c.mustTemplateAsset, + "manifests/prometheus-role.yaml", + "manifests/prometheus-role-binding.yaml", + ) + + errs := []error{} + for _, currResult := range directResourceResults { + if currResult.Error != nil { + errs = append(errs, fmt.Errorf("%q (%T): %v", currResult.File, currResult.Type, currResult.Error)) + } + } + + serviceMonitorBytes, err := c.mustTemplateAsset("manifests/service-monitor.yaml") + if err != nil { + errs = append(errs, fmt.Errorf("manifests/service-monitor.yaml: %v", err)) + } else { + _, serviceMonitorErr := resourceapply.ApplyServiceMonitor(c.dynamicClient, c.eventRecorder, serviceMonitorBytes) + errs = append(errs, serviceMonitorErr) + } + + err = v1helpers.NewMultiLineAggregate(errs) + + // NOTE: Failing to create the monitoring resources should not lead to operator failed state. + cond := operatorv1.OperatorCondition{ + Type: operatorStatusMonitoringResourceControllerDegraded, + Status: operatorv1.ConditionFalse, + } + if err != nil { + // this is not a typo. We will not have failing status on our operator for missing servicemonitor since servicemonitoring + // is not a prereq. + cond.Status = operatorv1.ConditionFalse + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +func (c *MonitoringResourceController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting MonitoringResourceController") + defer klog.Infof("Shutting down MonitoringResourceController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *MonitoringResourceController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *MonitoringResourceController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *MonitoringResourceController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go new file mode 100644 index 0000000000..85fb82965d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go @@ -0,0 +1,167 @@ +package node + +import ( + "fmt" + "time" + + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corelisterv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" +) + +const nodeControllerWorkQueueKey = "key" + +// NodeController watches for new master nodes and adds them to the node status list in the operator config status. +type NodeController struct { + operatorClient v1helpers.StaticPodOperatorClient + + nodeLister corelisterv1.NodeLister + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewNodeController creates a new node controller. +func NewNodeController( + operatorClient v1helpers.StaticPodOperatorClient, + kubeInformersClusterScoped informers.SharedInformerFactory, + eventRecorder events.Recorder, +) *NodeController { + c := &NodeController{ + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("node-controller"), + nodeLister: kubeInformersClusterScoped.Core().V1().Nodes().Lister(), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "NodeController"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersClusterScoped.Core().V1().Nodes().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersClusterScoped.Core().V1().Nodes().Informer().HasSynced) + + return c +} + +func (c NodeController) sync() error { + _, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + operatorStatus := originalOperatorStatus.DeepCopy() + + selector, err := labels.NewRequirement("node-role.kubernetes.io/master", selection.Equals, []string{""}) + if err != nil { + panic(err) + } + nodes, err := c.nodeLister.List(labels.NewSelector().Add(*selector)) + if err != nil { + return err + } + + newTargetNodeStates := []operatorv1.NodeStatus{} + // remove entries for missing nodes + for i, nodeState := range originalOperatorStatus.NodeStatuses { + found := false + for _, node := range nodes { + if nodeState.NodeName == node.Name { + found = true + } + } + if found { + newTargetNodeStates = append(newTargetNodeStates, originalOperatorStatus.NodeStatuses[i]) + } else { + c.eventRecorder.Warningf("MasterNodeRemoved", "Observed removal of master node %s", nodeState.NodeName) + } + } + + // add entries for new nodes + for _, node := range nodes { + found := false + for _, nodeState := range originalOperatorStatus.NodeStatuses { + if nodeState.NodeName == node.Name { + found = true + } + } + if found { + continue + } + + c.eventRecorder.Eventf("MasterNodeObserved", "Observed new master node %s", node.Name) + newTargetNodeStates = append(newTargetNodeStates, operatorv1.NodeStatus{NodeName: node.Name}) + } + + operatorStatus.NodeStatuses = newTargetNodeStates + if !equality.Semantic.DeepEqual(originalOperatorStatus, operatorStatus) { + if _, updateError := c.operatorClient.UpdateStaticPodOperatorStatus(resourceVersion, operatorStatus); updateError != nil { + return updateError + } + } + + return nil +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *NodeController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting NodeController") + defer klog.Infof("Shutting down NodeController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *NodeController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *NodeController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *NodeController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(nodeControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(nodeControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(nodeControllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go new file mode 100644 index 0000000000..dce0cd0bd8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go @@ -0,0 +1,254 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml = []byte(`apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: pruner +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: pruner + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir +`) + +func pkgOperatorStaticpodControllerPruneManifestsPrunerPodYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml, nil +} + +func pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerPruneManifestsPrunerPodYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml": pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "prune": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "pruner-pod.yaml": {pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml new file mode 100644 index 0000000000..bae7d9c05b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: pruner +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: pruner + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go new file mode 100644 index 0000000000..fb4a9c3365 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go @@ -0,0 +1,363 @@ +package prune + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// PruneController is a controller that watches static installer pod revision statuses and spawns +// a pruner pod to delete old revision resources from disk +type PruneController struct { + targetNamespace, podResourcePrefix string + // command is the string to use for the pruning pod command + command []string + + // prunerPodImageFn returns the image name for the pruning pod + prunerPodImageFn func() string + // ownerRefsFn sets the ownerrefs on the pruner pod + ownerRefsFn func(revision int32) ([]metav1.OwnerReference, error) + + operatorClient v1helpers.StaticPodOperatorClient + + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + podGetter corev1client.PodsGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +const ( + pruneControllerWorkQueueKey = "key" + statusConfigMapName = "revision-status-" + defaultRevisionLimit = int32(5) +) + +// NewPruneController creates a new pruning controller +func NewPruneController( + targetNamespace string, + podResourcePrefix string, + command []string, + configMapGetter corev1client.ConfigMapsGetter, + secretGetter corev1client.SecretsGetter, + podGetter corev1client.PodsGetter, + operatorClient v1helpers.StaticPodOperatorClient, + eventRecorder events.Recorder, +) *PruneController { + c := &PruneController{ + targetNamespace: targetNamespace, + podResourcePrefix: podResourcePrefix, + command: command, + + operatorClient: operatorClient, + + configMapGetter: configMapGetter, + secretGetter: secretGetter, + podGetter: podGetter, + eventRecorder: eventRecorder.WithComponentSuffix("prune-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "PruneController"), + prunerPodImageFn: getPrunerPodImageFromEnv, + } + + c.ownerRefsFn = c.setOwnerRefs + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +func getRevisionLimits(operatorSpec *operatorv1.StaticPodOperatorSpec) (int32, int32) { + failedRevisionLimit := defaultRevisionLimit + succeededRevisionLimit := defaultRevisionLimit + if operatorSpec.FailedRevisionLimit != 0 { + failedRevisionLimit = operatorSpec.FailedRevisionLimit + } + if operatorSpec.SucceededRevisionLimit != 0 { + succeededRevisionLimit = operatorSpec.SucceededRevisionLimit + } + return failedRevisionLimit, succeededRevisionLimit +} + +func (c *PruneController) excludedRevisionHistory(operatorStatus *operatorv1.StaticPodOperatorStatus, failedRevisionLimit, succeededRevisionLimit int32) ([]int, error) { + var succeededRevisions, failedRevisions, inProgressRevisions, unknownStatusRevisions []int + + configMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return []int{}, err + } + for _, configMap := range configMaps.Items { + if !strings.HasPrefix(configMap.Name, statusConfigMapName) { + continue + } + + if revision, ok := configMap.Data["revision"]; ok { + revisionNumber, err := strconv.Atoi(revision) + if err != nil { + return []int{}, err + } + switch configMap.Data["status"] { + case string(corev1.PodSucceeded): + succeededRevisions = append(succeededRevisions, revisionNumber) + case string(corev1.PodFailed): + failedRevisions = append(failedRevisions, revisionNumber) + + case "InProgress": + // we always protect inprogress + inProgressRevisions = append(inProgressRevisions, revisionNumber) + + default: + // protect things you don't understand + unknownStatusRevisions = append(unknownStatusRevisions, revisionNumber) + c.eventRecorder.Event("UnknownRevisionStatus", fmt.Sprintf("unknown status for revision %d: %v", revisionNumber, configMap.Data["status"])) + } + } + } + + // Return early if nothing to prune + if len(succeededRevisions)+len(failedRevisions) == 0 { + klog.V(2).Info("no revision IDs currently eligible to prune") + return []int{}, nil + } + + // Get list of protected IDs + protectedSucceededRevisions := protectedRevisions(succeededRevisions, int(succeededRevisionLimit)) + protectedFailedRevisions := protectedRevisions(failedRevisions, int(failedRevisionLimit)) + + excludedRevisions := make([]int, 0, len(protectedSucceededRevisions)+len(protectedFailedRevisions)+len(inProgressRevisions)+len(unknownStatusRevisions)) + excludedRevisions = append(excludedRevisions, protectedSucceededRevisions...) + excludedRevisions = append(excludedRevisions, protectedFailedRevisions...) + excludedRevisions = append(excludedRevisions, inProgressRevisions...) + excludedRevisions = append(excludedRevisions, unknownStatusRevisions...) + sort.Ints(excludedRevisions) + + // There should always be at least 1 excluded ID, otherwise we'll delete the current revision + if len(excludedRevisions) == 0 { + return []int{}, fmt.Errorf("need at least 1 excluded ID for revision pruning") + } + return excludedRevisions, nil +} + +func (c *PruneController) pruneDiskResources(operatorStatus *operatorv1.StaticPodOperatorStatus, excludedRevisions []int, maxEligibleRevision int) error { + // Run pruning pod on each node and pin it to that node + for _, nodeStatus := range operatorStatus.NodeStatuses { + // Use the highest value between CurrentRevision and LastFailedRevision + // Because CurrentRevision only updates on successful installs and we still prune on an unsuccessful install + if err := c.ensurePrunePod(nodeStatus.NodeName, maxEligibleRevision, excludedRevisions, max(nodeStatus.LastFailedRevision, nodeStatus.CurrentRevision)); err != nil { + return err + } + } + return nil +} + +func (c *PruneController) pruneAPIResources(excludedRevisions []int, maxEligibleRevision int) error { + protectedRevisions := sets.NewInt(excludedRevisions...) + statusConfigMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return err + } + for _, cm := range statusConfigMaps.Items { + if !strings.HasPrefix(cm.Name, statusConfigMapName) { + continue + } + + revision, err := strconv.Atoi(cm.Data["revision"]) + if err != nil { + return fmt.Errorf("unexpected error converting revision to int: %+v", err) + } + + if protectedRevisions.Has(revision) { + continue + } + if revision > maxEligibleRevision { + continue + } + if err := c.configMapGetter.ConfigMaps(c.targetNamespace).Delete(cm.Name, &metav1.DeleteOptions{}); err != nil { + return err + } + } + return nil +} + +func protectedRevisions(revisions []int, revisionLimit int) []int { + sort.Ints(revisions) + if len(revisions) == 0 { + return revisions + } + startKey := 0 + // We use -1 = unlimited revisions, so protect all. Limit shouldn't ever be literally 0 either + if revisionLimit > 0 && len(revisions) > revisionLimit { + startKey = len(revisions) - revisionLimit + } + return revisions[startKey:] +} + +func (c *PruneController) ensurePrunePod(nodeName string, maxEligibleRevision int, protectedRevisions []int, revision int32) error { + if revision == 0 { + return nil + } + pod := resourceread.ReadPodV1OrDie(bindata.MustAsset(filepath.Join("pkg/operator/staticpod/controller/prune", "manifests/pruner-pod.yaml"))) + + pod.Name = getPrunerPodName(nodeName, revision) + pod.Namespace = c.targetNamespace + pod.Spec.NodeName = nodeName + pod.Spec.Containers[0].Image = c.prunerPodImageFn() + pod.Spec.Containers[0].Command = c.command + pod.Spec.Containers[0].Args = append(pod.Spec.Containers[0].Args, + fmt.Sprintf("-v=%d", 4), + fmt.Sprintf("--max-eligible-revision=%d", maxEligibleRevision), + fmt.Sprintf("--protected-revisions=%s", revisionsToString(protectedRevisions)), + fmt.Sprintf("--resource-dir=%s", "/etc/kubernetes/static-pod-resources"), + fmt.Sprintf("--static-pod-name=%s", c.podResourcePrefix), + ) + + ownerRefs, err := c.ownerRefsFn(revision) + if err != nil { + return fmt.Errorf("unable to set pruner pod ownerrefs: %+v", err) + } + pod.OwnerReferences = ownerRefs + + _, _, err = resourceapply.ApplyPod(c.podGetter, c.eventRecorder, pod) + return err +} + +func (c *PruneController) setOwnerRefs(revision int32) ([]metav1.OwnerReference, error) { + ownerReferences := []metav1.OwnerReference{} + statusConfigMap, err := c.configMapGetter.ConfigMaps(c.targetNamespace).Get(fmt.Sprintf("revision-status-%d", revision), metav1.GetOptions{}) + if err == nil { + ownerReferences = append(ownerReferences, metav1.OwnerReference{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: statusConfigMap.Name, + UID: statusConfigMap.UID, + }) + } + return ownerReferences, err +} + +func getPrunerPodName(nodeName string, revision int32) string { + return fmt.Sprintf("revision-pruner-%d-%s", revision, nodeName) +} + +func revisionsToString(revisions []int) string { + values := []string{} + for _, id := range revisions { + value := strconv.Itoa(id) + values = append(values, value) + } + return strings.Join(values, ",") +} + +func getPrunerPodImageFromEnv() string { + return os.Getenv("OPERATOR_IMAGE") +} + +func (c *PruneController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting PruneController") + defer klog.Infof("Shutting down PruneController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *PruneController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *PruneController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *PruneController) sync() error { + klog.V(5).Info("Syncing revision pruner") + operatorSpec, operatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + failedLimit, succeededLimit := getRevisionLimits(operatorSpec) + + excludedRevisions, err := c.excludedRevisionHistory(operatorStatus, failedLimit, succeededLimit) + if err != nil { + return err + } + // if no IDs are excluded, then there is nothing to prune + if len(excludedRevisions) == 0 { + klog.Info("No excluded revisions to prune, skipping") + return nil + } + + errs := []error{} + if diskErr := c.pruneDiskResources(operatorStatus, excludedRevisions, excludedRevisions[len(excludedRevisions)-1]); diskErr != nil { + errs = append(errs, diskErr) + } + if apiErr := c.pruneAPIResources(excludedRevisions, excludedRevisions[len(excludedRevisions)-1]); apiErr != nil { + errs = append(errs, apiErr) + } + return v1helpers.NewMultiLineAggregate(errs) +} + +// eventHandler queues the operator to check spec and status +func (c *PruneController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(pruneControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(pruneControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(pruneControllerWorkQueueKey) }, + } +} + +func max(a, b int32) int32 { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go new file mode 100644 index 0000000000..d08b3667da --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go @@ -0,0 +1,381 @@ +package revision + +import ( + "fmt" + "strconv" + "strings" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const operatorStatusRevisionControllerDegraded = "RevisionControllerDegraded" +const revisionControllerWorkQueueKey = "key" + +// RevisionController is a controller that watches a set of configmaps and secrets and them against a revision snapshot +// of them. If the original resources changes, the revision counter is increased, stored in LatestAvailableRevision +// field of the operator config and new snapshots suffixed by the revision are created. +type RevisionController struct { + targetNamespace string + // configMaps is the list of configmaps that are directly copied.A different actor/controller modifies these. + // the first element should be the configmap that contains the static pod manifest + configMaps []RevisionResource + // secrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these. + secrets []RevisionResource + + operatorClient v1helpers.StaticPodOperatorClient + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +type RevisionResource struct { + Name string + Optional bool +} + +// NewRevisionController create a new revision controller. +func NewRevisionController( + targetNamespace string, + configMaps []RevisionResource, + secrets []RevisionResource, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + operatorClient v1helpers.StaticPodOperatorClient, + configMapGetter corev1client.ConfigMapsGetter, + secretGetter corev1client.SecretsGetter, + eventRecorder events.Recorder, +) *RevisionController { + c := &RevisionController{ + targetNamespace: targetNamespace, + configMaps: configMaps, + secrets: secrets, + + operatorClient: operatorClient, + configMapGetter: configMapGetter, + secretGetter: secretGetter, + eventRecorder: eventRecorder.WithComponentSuffix("revision-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RevisionController"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().ConfigMaps().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Secrets().Informer().HasSynced) + + return c +} + +// createRevisionIfNeeded takes care of creating content for the static pods to use. +// returns whether or not requeue and if an error happened when updating status. Normally it updates status itself. +func (c RevisionController) createRevisionIfNeeded(operatorSpec *operatorv1.StaticPodOperatorSpec, operatorStatusOriginal *operatorv1.StaticPodOperatorStatus, resourceVersion string) (bool, error) { + operatorStatus := operatorStatusOriginal.DeepCopy() + + latestRevision := operatorStatus.LatestAvailableRevision + isLatestRevisionCurrent, reason := c.isLatestRevisionCurrent(latestRevision) + + // check to make sure that the latestRevision has the exact content we expect. No mutation here, so we start creating the next Revision only when it is required + if isLatestRevisionCurrent { + return false, nil + } + + nextRevision := latestRevision + 1 + c.eventRecorder.Eventf("RevisionTriggered", "new revision %d triggered by %q", nextRevision, reason) + if err := c.createNewRevision(nextRevision); err != nil { + cond := operatorv1.OperatorCondition{ + Type: "RevisionControllerDegraded", + Status: operatorv1.ConditionTrue, + Reason: "ContentCreationError", + Message: err.Error(), + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + c.eventRecorder.Warningf("RevisionCreateFailed", "Failed to create revision %d: %v", nextRevision, err.Error()) + return true, updateError + } + return true, nil + } + + cond := operatorv1.OperatorCondition{ + Type: "RevisionControllerDegraded", + Status: operatorv1.ConditionFalse, + } + if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), func(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + if operatorStatus.LatestAvailableRevision == nextRevision { + klog.Warningf("revision %d is unexpectedly already the latest available revision. This is a possible race!", nextRevision) + return fmt.Errorf("conflicting latestAvailableRevision %d", operatorStatus.LatestAvailableRevision) + } + operatorStatus.LatestAvailableRevision = nextRevision + return nil + }); updateError != nil { + return true, updateError + } else if updated { + c.eventRecorder.Eventf("RevisionCreate", "Revision %d created because %s", operatorStatus.LatestAvailableRevision, reason) + } + + return false, nil +} + +func nameFor(name string, revision int32) string { + return fmt.Sprintf("%s-%d", name, revision) +} + +// isLatestRevisionCurrent returns whether the latest revision is up to date and an optional reason +func (c RevisionController) isLatestRevisionCurrent(revision int32) (bool, string) { + configChanges := []string{} + for _, cm := range c.configMaps { + requiredData := map[string]string{} + existingData := map[string]string{} + + required, err := c.configMapGetter.ConfigMaps(c.targetNamespace).Get(cm.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !cm.Optional { + return false, err.Error() + } + existing, err := c.configMapGetter.ConfigMaps(c.targetNamespace).Get(nameFor(cm.Name, revision), metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !cm.Optional { + return false, err.Error() + } + if required != nil { + requiredData = required.Data + } + if existing != nil { + existingData = existing.Data + } + if !equality.Semantic.DeepEqual(existingData, requiredData) { + if klog.V(4) { + klog.Infof("configmap %q changes for revision %d: %s", cm.Name, revision, resourceapply.JSONPatch(existing, required)) + } + configChanges = append(configChanges, fmt.Sprintf("configmap/%s has changed", cm.Name)) + } + } + + secretChanges := []string{} + for _, s := range c.secrets { + requiredData := map[string][]byte{} + existingData := map[string][]byte{} + + required, err := c.secretGetter.Secrets(c.targetNamespace).Get(s.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !s.Optional { + return false, err.Error() + } + existing, err := c.secretGetter.Secrets(c.targetNamespace).Get(nameFor(s.Name, revision), metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !s.Optional { + return false, err.Error() + } + if required != nil { + requiredData = required.Data + } + if existing != nil { + existingData = existing.Data + } + if !equality.Semantic.DeepEqual(existingData, requiredData) { + if klog.V(4) { + klog.Infof("secret %q changes for revision %d: %s", s.Name, revision, resourceapply.JSONPatch(existing, required)) + } + secretChanges = append(secretChanges, fmt.Sprintf("secret/%s has changed", s.Name)) + } + } + + if len(secretChanges) > 0 || len(configChanges) > 0 { + return false, strings.Join(append(secretChanges, configChanges...), ",") + } + + return true, "" +} + +func (c RevisionController) createNewRevision(revision int32) error { + statusConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: c.targetNamespace, + Name: nameFor("revision-status", revision), + }, + Data: map[string]string{ + "status": "InProgress", + "revision": fmt.Sprintf("%d", revision), + }, + } + statusConfigMap, _, err := resourceapply.ApplyConfigMap(c.configMapGetter, c.eventRecorder, statusConfigMap) + if err != nil { + return err + } + ownerRefs := []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: statusConfigMap.Name, + UID: statusConfigMap.UID, + }} + + for _, cm := range c.configMaps { + obj, _, err := resourceapply.SyncConfigMap(c.configMapGetter, c.eventRecorder, c.targetNamespace, cm.Name, c.targetNamespace, nameFor(cm.Name, revision), ownerRefs) + if err != nil { + return err + } + if obj == nil && !cm.Optional { + return apierrors.NewNotFound(corev1.Resource("configmaps"), cm.Name) + } + } + for _, s := range c.secrets { + obj, _, err := resourceapply.SyncSecret(c.secretGetter, c.eventRecorder, c.targetNamespace, s.Name, c.targetNamespace, nameFor(s.Name, revision), ownerRefs) + if err != nil { + return err + } + if obj == nil && !s.Optional { + return apierrors.NewNotFound(corev1.Resource("secrets"), s.Name) + } + } + + return nil +} + +// getLatestAvailableRevision returns the latest known revision to the operator +// This is either the LatestAvailableRevision in the status or by checking revision status configmaps +func (c RevisionController) getLatestAvailableRevision(operatorStatus *operatorv1.StaticPodOperatorStatus) (int32, error) { + configMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return 0, err + } + var latestRevision int32 + for _, configMap := range configMaps.Items { + if !strings.HasPrefix(configMap.Name, "revision-status-") { + continue + } + if revision, ok := configMap.Data["revision"]; ok { + revisionNumber, err := strconv.Atoi(revision) + if err != nil { + return 0, err + } + if int32(revisionNumber) > latestRevision { + latestRevision = int32(revisionNumber) + } + } + } + // If there are no configmaps, then this should actually be revision 0 + return latestRevision, nil +} + +func (c RevisionController) sync() error { + operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorStateWithQuorum() + if err != nil { + return err + } + operatorStatus := originalOperatorStatus.DeepCopy() + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + // If the operator status has 0 as its latest available revision, this is either the first revision + // or possibly the operator resource was deleted and reset back to 0, which is not what we want so check configmaps + if operatorStatus.LatestAvailableRevision == 0 { + // Check to see if current revision is accurate and if not, search through configmaps for latest revision + latestRevision, err := c.getLatestAvailableRevision(operatorStatus) + if err != nil { + return err + } + if latestRevision != 0 { + // Then make sure that revision number is what's in the operator status + _, _, err = v1helpers.UpdateStaticPodStatus(c.operatorClient, func(status *operatorv1.StaticPodOperatorStatus) error { + status.LatestAvailableRevision = latestRevision + return nil + }) + // If we made a change return and requeue with the correct status + return fmt.Errorf("synthetic requeue request (err: %v)", err) + } + } + + requeue, syncErr := c.createRevisionIfNeeded(operatorSpec, operatorStatus, resourceVersion) + if requeue && syncErr == nil { + return fmt.Errorf("synthetic requeue request (err: %v)", syncErr) + } + err = syncErr + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: operatorStatusRevisionControllerDegraded, + Status: operatorv1.ConditionFalse, + } + if err != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *RevisionController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting RevisionController") + defer klog.Infof("Shutting down RevisionController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *RevisionController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *RevisionController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *RevisionController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(revisionControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(revisionControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(revisionControllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go new file mode 100644 index 0000000000..34e46b98b2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go @@ -0,0 +1,217 @@ +package staticpodstate + +import ( + "fmt" + "strings" + "time" + + "k8s.io/klog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/status" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var ( + staticPodStateControllerDegraded = "StaticPodsDegraded" + staticPodStateControllerWorkQueueKey = "key" +) + +// StaticPodStateController is a controller that watches static pods and will produce a failing status if the +//// static pods start crashing for some reason. +type StaticPodStateController struct { + targetNamespace string + staticPodName string + operandName string + operatorNamespace string + + operatorClient v1helpers.StaticPodOperatorClient + configMapGetter corev1client.ConfigMapsGetter + podsGetter corev1client.PodsGetter + versionRecorder status.VersionGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewStaticPodStateController creates a controller that watches static pods and will produce a failing status if the +// static pods start crashing for some reason. +func NewStaticPodStateController( + targetNamespace, staticPodName, operatorNamespace, operandName string, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + operatorClient v1helpers.StaticPodOperatorClient, + configMapGetter corev1client.ConfigMapsGetter, + podsGetter corev1client.PodsGetter, + versionRecorder status.VersionGetter, + eventRecorder events.Recorder, +) *StaticPodStateController { + c := &StaticPodStateController{ + targetNamespace: targetNamespace, + staticPodName: staticPodName, + operandName: operandName, + operatorNamespace: operatorNamespace, + + operatorClient: operatorClient, + configMapGetter: configMapGetter, + podsGetter: podsGetter, + versionRecorder: versionRecorder, + eventRecorder: eventRecorder.WithComponentSuffix("static-pod-state-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StaticPodStateController"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + + return c +} + +func (c *StaticPodStateController) sync() error { + operatorSpec, originalOperatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + errs := []error{} + failingErrorCount := 0 + images := sets.NewString() + for _, node := range originalOperatorStatus.NodeStatuses { + pod, err := c.podsGetter.Pods(c.targetNamespace).Get(mirrorPodNameForNode(c.staticPodName, node.NodeName), metav1.GetOptions{}) + if err != nil { + errs = append(errs, err) + failingErrorCount++ + continue + } + images.Insert(pod.Spec.Containers[0].Image) + + for _, containerStatus := range pod.Status.ContainerStatuses { + if !containerStatus.Ready { + // When container is not ready, we can't determine whether the operator is failing or not and every container will become not + // ready when created, so do not blip the failing state for it. + // We will still reflect the container not ready state in error conditions, but we don't set the operator as failed. + errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is not ready", node.NodeName, pod.Name, containerStatus.Name)) + } + if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason != "PodInitializing" { + errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is waiting: %q - %q", node.NodeName, pod.Name, containerStatus.Name, containerStatus.State.Waiting.Reason, containerStatus.State.Waiting.Message)) + failingErrorCount++ + } + if containerStatus.State.Terminated != nil { + // Containers can be terminated gracefully to trigger certificate reload, do not report these as failures. + errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is terminated: %q - %q", node.NodeName, pod.Name, containerStatus.Name, containerStatus.State.Terminated.Reason, containerStatus.State.Terminated.Message)) + // Only in case when the termination was caused by error. + if containerStatus.State.Terminated.ExitCode != 0 { + failingErrorCount++ + } + + } + } + } + + if len(images) == 0 { + c.eventRecorder.Warningf("MissingVersion", "no image found for operand pod") + } else if len(images) > 1 { + c.eventRecorder.Eventf("MultipleVersions", "multiple versions found, probably in transition: %v", strings.Join(images.List(), ",")) + } else { + c.versionRecorder.SetVersion( + c.operandName, + status.VersionForOperandFromEnv(), + ) + } + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: staticPodStateControllerDegraded, + Status: operatorv1.ConditionFalse, + } + // Failing errors + if failingErrorCount > 0 { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = v1helpers.NewMultiLineAggregate(errs).Error() + } + // Not failing errors + if failingErrorCount == 0 && len(errs) > 0 { + cond.Reason = "Error" + cond.Message = v1helpers.NewMultiLineAggregate(errs).Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +func mirrorPodNameForNode(staticPodName, nodeName string) string { + return staticPodName + "-" + nodeName +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *StaticPodStateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting StaticPodStateController") + defer klog.Infof("Shutting down StaticPodStateController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *StaticPodStateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *StaticPodStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *StaticPodStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(staticPodStateControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(staticPodStateControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(staticPodStateControllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go new file mode 100644 index 0000000000..f17b19871b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go @@ -0,0 +1,284 @@ +package staticpod + +import ( + "fmt" + + "github.com/openshift/library-go/pkg/operator/loglevel" + + "github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller" + + "k8s.io/apimachinery/pkg/util/errors" + + "github.com/openshift/library-go/pkg/operator/status" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installer" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/node" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/prune" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +type staticPodOperatorControllerBuilder struct { + // clients and related + staticPodOperatorClient v1helpers.StaticPodOperatorClient + kubeClient kubernetes.Interface + kubeInformers v1helpers.KubeInformersForNamespaces + dynamicClient dynamic.Interface + eventRecorder events.Recorder + + // resource information + operandNamespace string + staticPodName string + revisionConfigMaps []revision.RevisionResource + revisionSecrets []revision.RevisionResource + + // cert information + certDir string + certConfigMaps []revision.RevisionResource + certSecrets []revision.RevisionResource + + // versioner information + versionRecorder status.VersionGetter + operatorNamespace string + operandName string + + // installer information + installCommand []string + + // pruning information + pruneCommand []string + // TODO de-dupe this. I think it's actually a directory name + staticPodPrefix string +} + +func NewBuilder( + staticPodOperatorClient v1helpers.StaticPodOperatorClient, + kubeClient kubernetes.Interface, + kubeInformers v1helpers.KubeInformersForNamespaces, +) Builder { + return &staticPodOperatorControllerBuilder{ + staticPodOperatorClient: staticPodOperatorClient, + kubeClient: kubeClient, + kubeInformers: kubeInformers, + } +} + +// Builder allows the caller to construct a set of static pod controllers in pieces +type Builder interface { + WithEvents(eventRecorder events.Recorder) Builder + WithServiceMonitor(dynamicClient dynamic.Interface) Builder + WithVersioning(operatorNamespace, operandName string, versionRecorder status.VersionGetter) Builder + WithResources(operandNamespace, staticPodName string, revisionConfigMaps, revisionSecrets []revision.RevisionResource) Builder + WithCerts(certDir string, certConfigMaps, certSecrets []revision.RevisionResource) Builder + WithInstaller(command []string) Builder + WithPruning(command []string, staticPodPrefix string) Builder + ToControllers() (*staticPodOperatorControllers, error) +} + +func (b *staticPodOperatorControllerBuilder) WithEvents(eventRecorder events.Recorder) Builder { + b.eventRecorder = eventRecorder + return b +} + +func (b *staticPodOperatorControllerBuilder) WithServiceMonitor(dynamicClient dynamic.Interface) Builder { + b.dynamicClient = dynamicClient + return b +} + +func (b *staticPodOperatorControllerBuilder) WithVersioning(operatorNamespace, operandName string, versionRecorder status.VersionGetter) Builder { + b.operatorNamespace = operatorNamespace + b.operandName = operandName + b.versionRecorder = versionRecorder + return b +} + +func (b *staticPodOperatorControllerBuilder) WithResources(operandNamespace, staticPodName string, revisionConfigMaps, revisionSecrets []revision.RevisionResource) Builder { + b.operandNamespace = operandNamespace + b.staticPodName = staticPodName + b.revisionConfigMaps = revisionConfigMaps + b.revisionSecrets = revisionSecrets + return b +} + +func (b *staticPodOperatorControllerBuilder) WithCerts(certDir string, certConfigMaps, certSecrets []revision.RevisionResource) Builder { + b.certDir = certDir + b.certConfigMaps = certConfigMaps + b.certSecrets = certSecrets + return b +} + +func (b *staticPodOperatorControllerBuilder) WithInstaller(command []string) Builder { + b.installCommand = command + return b +} + +func (b *staticPodOperatorControllerBuilder) WithPruning(command []string, staticPodPrefix string) Builder { + b.pruneCommand = command + b.staticPodPrefix = staticPodPrefix + return b +} + +func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperatorControllers, error) { + controllers := &staticPodOperatorControllers{} + + eventRecorder := b.eventRecorder + if eventRecorder == nil { + eventRecorder = events.NewLoggingEventRecorder("static-pod-operator-controller") + } + versionRecorder := b.versionRecorder + if versionRecorder == nil { + versionRecorder = status.NewVersionGetter() + } + configMapClient := v1helpers.CachedConfigMapGetter(b.kubeClient.CoreV1(), b.kubeInformers) + secretClient := v1helpers.CachedSecretGetter(b.kubeClient.CoreV1(), b.kubeInformers) + podClient := b.kubeClient.CoreV1() + operandInformers := b.kubeInformers.InformersFor(b.operandNamespace) + clusterInformers := b.kubeInformers.InformersFor("") + + if len(b.operandNamespace) > 0 { + controllers.revisionController = revision.NewRevisionController( + b.operandNamespace, + b.revisionConfigMaps, + b.revisionSecrets, + operandInformers, + b.staticPodOperatorClient, + configMapClient, + secretClient, + eventRecorder, + ) + } + + if len(b.installCommand) > 0 { + controllers.installerController = installer.NewInstallerController( + b.operandNamespace, + b.staticPodName, + b.revisionConfigMaps, + b.revisionSecrets, + b.installCommand, + operandInformers, + b.staticPodOperatorClient, + configMapClient, + secretClient, + podClient, + eventRecorder, + ).WithCerts( + b.certDir, + b.certConfigMaps, + b.certSecrets, + ) + } + + if len(b.operandName) > 0 { + // TODO add handling for operator configmap changes to get version-mapping changes + controllers.staticPodStateController = staticpodstate.NewStaticPodStateController( + b.operandNamespace, + b.staticPodName, + b.operatorNamespace, + b.operandName, + operandInformers, + b.staticPodOperatorClient, + configMapClient, + podClient, + versionRecorder, + eventRecorder, + ) + } + + if len(b.pruneCommand) > 0 { + controllers.pruneController = prune.NewPruneController( + b.operandNamespace, + b.staticPodPrefix, + b.pruneCommand, + configMapClient, + secretClient, + podClient, + b.staticPodOperatorClient, + eventRecorder, + ) + } + + controllers.nodeController = node.NewNodeController( + b.staticPodOperatorClient, + clusterInformers, + eventRecorder, + ) + + controllers.backingResourceController = backingresource.NewBackingResourceController( + b.operandNamespace, + b.staticPodOperatorClient, + operandInformers, + b.kubeClient, + eventRecorder, + ) + + if b.dynamicClient != nil { + controllers.monitoringResourceController = monitoring.NewMonitoringResourceController( + b.operandNamespace, + b.operandNamespace, + b.staticPodOperatorClient, + operandInformers, + b.kubeClient, + b.dynamicClient, + eventRecorder, + ) + } + + controllers.unsupportedConfigOverridesController = unsupportedconfigoverridescontroller.NewUnsupportedConfigOverridesController(b.staticPodOperatorClient, eventRecorder) + controllers.logLevelController = loglevel.NewClusterOperatorLoggingController(b.staticPodOperatorClient, eventRecorder) + + errs := []error{} + if controllers.revisionController == nil { + errs = append(errs, fmt.Errorf("missing revisionController; cannot proceed")) + } + if controllers.installerController == nil { + errs = append(errs, fmt.Errorf("missing installerController; cannot proceed")) + } + if controllers.staticPodStateController == nil { + eventRecorder.Warning("StaticPodStateControllerMissing", "not enough information provided, not all functionality is present") + } + if controllers.pruneController == nil { + eventRecorder.Warning("PruningControllerMissing", "not enough information provided, not all functionality is present") + } + if controllers.monitoringResourceController == nil { + eventRecorder.Warning("MonitoringResourceController", "not enough information provided, not all functionality is present") + } + + return controllers, errors.NewAggregate(errs) +} + +type staticPodOperatorControllers struct { + revisionController *revision.RevisionController + installerController *installer.InstallerController + staticPodStateController *staticpodstate.StaticPodStateController + pruneController *prune.PruneController + nodeController *node.NodeController + backingResourceController *backingresource.BackingResourceController + monitoringResourceController *monitoring.MonitoringResourceController + unsupportedConfigOverridesController *unsupportedconfigoverridescontroller.UnsupportedConfigOverridesController + logLevelController *loglevel.LogLevelController +} + +func (o *staticPodOperatorControllers) WithInstallerPodMutationFn(installerPodMutationFn installer.InstallerPodMutationFunc) *staticPodOperatorControllers { + o.installerController.WithInstallerPodMutationFn(installerPodMutationFn) + return o +} + +func (o *staticPodOperatorControllers) Run(stopCh <-chan struct{}) { + go o.revisionController.Run(1, stopCh) + go o.installerController.Run(1, stopCh) + go o.staticPodStateController.Run(1, stopCh) + go o.pruneController.Run(1, stopCh) + go o.nodeController.Run(1, stopCh) + go o.backingResourceController.Run(1, stopCh) + go o.monitoringResourceController.Run(1, stopCh) + go o.unsupportedConfigOverridesController.Run(1, stopCh) + go o.logLevelController.Run(1, stopCh) + + <-stopCh +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go new file mode 100644 index 0000000000..2738ba2b95 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go @@ -0,0 +1,359 @@ +package installerpod + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/resource/retry" +) + +type InstallOptions struct { + // TODO replace with genericclioptions + KubeConfig string + KubeClient kubernetes.Interface + + Revision string + Namespace string + + PodConfigMapNamePrefix string + SecretNamePrefixes []string + OptionalSecretNamePrefixes []string + ConfigMapNamePrefixes []string + OptionalConfigMapNamePrefixes []string + + CertSecretNames []string + OptionalCertSecretNamePrefixes []string + CertConfigMapNamePrefixes []string + OptionalCertConfigMapNamePrefixes []string + + CertDir string + ResourceDir string + PodManifestDir string + + Timeout time.Duration + + PodMutationFns []PodMutationFunc +} + +// PodMutationFunc is a function that has a chance at changing the pod before it is created +type PodMutationFunc func(pod *corev1.Pod) error + +func NewInstallOptions() *InstallOptions { + return &InstallOptions{} +} + +func (o *InstallOptions) WithPodMutationFn(podMutationFn PodMutationFunc) *InstallOptions { + o.PodMutationFns = append(o.PodMutationFns, podMutationFn) + return o +} + +func NewInstaller() *cobra.Command { + o := NewInstallOptions() + + cmd := &cobra.Command{ + Use: "installer", + Short: "Install static pod and related resources", + Run: func(cmd *cobra.Command, args []string) { + klog.V(1).Info(cmd.Flags()) + klog.V(1).Info(spew.Sdump(o)) + + if err := o.Complete(); err != nil { + klog.Fatal(err) + } + if err := o.Validate(); err != nil { + klog.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.TODO(), o.Timeout) + defer cancel() + if err := o.Run(ctx); err != nil { + klog.Fatal(err) + } + }, + } + + o.AddFlags(cmd.Flags()) + + return cmd +} + +func (o *InstallOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty") + fs.StringVar(&o.Revision, "revision", o.Revision, "identifier for this particular installation instance. For example, a counter or a hash") + fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to retrieve all resources from and create the static pod in") + fs.StringVar(&o.PodConfigMapNamePrefix, "pod", o.PodConfigMapNamePrefix, "name of configmap that contains the pod to be created") + fs.StringSliceVar(&o.SecretNamePrefixes, "secrets", o.SecretNamePrefixes, "list of secret names to be included") + fs.StringSliceVar(&o.ConfigMapNamePrefixes, "configmaps", o.ConfigMapNamePrefixes, "list of configmaps to be included") + fs.StringSliceVar(&o.OptionalSecretNamePrefixes, "optional-secrets", o.OptionalSecretNamePrefixes, "list of optional secret names to be included") + fs.StringSliceVar(&o.OptionalConfigMapNamePrefixes, "optional-configmaps", o.OptionalConfigMapNamePrefixes, "list of optional configmaps to be included") + fs.StringVar(&o.ResourceDir, "resource-dir", o.ResourceDir, "directory for all files supporting the static pod manifest") + fs.StringVar(&o.PodManifestDir, "pod-manifest-dir", o.PodManifestDir, "directory for the static pod manifest") + fs.DurationVar(&o.Timeout, "timeout-duration", 120*time.Second, "maximum time in seconds to wait for the copying to complete (default: 2m)") + + fs.StringSliceVar(&o.CertSecretNames, "cert-secrets", o.CertSecretNames, "list of secret names to be included") + fs.StringSliceVar(&o.CertConfigMapNamePrefixes, "cert-configmaps", o.CertConfigMapNamePrefixes, "list of configmaps to be included") + fs.StringSliceVar(&o.OptionalCertSecretNamePrefixes, "optional-cert-secrets", o.OptionalCertSecretNamePrefixes, "list of optional secret names to be included") + fs.StringSliceVar(&o.OptionalCertConfigMapNamePrefixes, "optional-cert-configmaps", o.OptionalCertConfigMapNamePrefixes, "list of optional configmaps to be included") + fs.StringVar(&o.CertDir, "cert-dir", o.CertDir, "directory for all certs") +} + +func (o *InstallOptions) Complete() error { + clientConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfig, nil) + if err != nil { + return err + } + + // Use protobuf to fetch configmaps and secrets and create pods. + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + + o.KubeClient, err = kubernetes.NewForConfig(protoConfig) + if err != nil { + return err + } + return nil +} + +func (o *InstallOptions) Validate() error { + if len(o.Revision) == 0 { + return fmt.Errorf("--revision is required") + } + if len(o.Namespace) == 0 { + return fmt.Errorf("--namespace is required") + } + if len(o.PodConfigMapNamePrefix) == 0 { + return fmt.Errorf("--pod is required") + } + if len(o.ConfigMapNamePrefixes) == 0 { + return fmt.Errorf("--configmaps is required") + } + if o.Timeout == 0 { + return fmt.Errorf("--timeout-duration cannot be 0") + } + + if o.KubeClient == nil { + return fmt.Errorf("missing client") + } + + return nil +} + +func (o *InstallOptions) nameFor(prefix string) string { + return fmt.Sprintf("%s-%s", prefix, o.Revision) +} + +func (o *InstallOptions) prefixFor(name string) string { + return name[0 : len(name)-len(fmt.Sprintf("-%s", o.Revision))] +} + +func (o *InstallOptions) copySecretsAndConfigMaps(ctx context.Context, resourceDir string, + secretNames, optionalSecretNames, configNames, optionalConfigNames sets.String, prefixed bool) error { + klog.Infof("Creating target resource directory %q ...", resourceDir) + if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) { + return err + } + + // Gather secrets. If we get API server error, retry getting until we hit the timeout. + // Retrying will prevent temporary API server blips or networking issues. + // We return when all "required" secrets are gathered, optional secrets are not checked. + klog.Infof("Getting secrets ...") + secrets := []*corev1.Secret{} + for _, name := range append(secretNames.List(), optionalSecretNames.List()...) { + secret, err := o.getSecretWithRetry(ctx, name, optionalSecretNames.Has(name)) + if err != nil { + return err + } + // secret is nil means the secret was optional and we failed to get it. + if secret != nil { + secrets = append(secrets, secret) + } + } + + klog.Infof("Getting config maps ...") + configs := []*corev1.ConfigMap{} + for _, name := range append(configNames.List(), optionalConfigNames.List()...) { + config, err := o.getConfigMapWithRetry(ctx, name, optionalConfigNames.Has(name)) + if err != nil { + return err + } + // config is nil means the config was optional and we failed to get it. + if config != nil { + configs = append(configs, config) + } + } + + for _, secret := range secrets { + secretBaseName := secret.Name + if prefixed { + secretBaseName = o.prefixFor(secret.Name) + } + contentDir := path.Join(resourceDir, "secrets", secretBaseName) + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil { + return err + } + for filename, content := range secret.Data { + // TODO fix permissions + klog.Infof("Writing secret manifest %q ...", path.Join(contentDir, filename)) + if err := ioutil.WriteFile(path.Join(contentDir, filename), content, 0644); err != nil { + return err + } + } + } + for _, configmap := range configs { + configMapBaseName := configmap.Name + if prefixed { + configMapBaseName = o.prefixFor(configmap.Name) + } + contentDir := path.Join(resourceDir, "configmaps", configMapBaseName) + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil { + return err + } + for filename, content := range configmap.Data { + klog.Infof("Writing config file %q ...", path.Join(contentDir, filename)) + if err := ioutil.WriteFile(path.Join(contentDir, filename), []byte(content), 0644); err != nil { + return err + } + } + } + + return nil +} + +func (o *InstallOptions) copyContent(ctx context.Context) error { + resourceDir := path.Join(o.ResourceDir, o.nameFor(o.PodConfigMapNamePrefix)) + klog.Infof("Creating target resource directory %q ...", resourceDir) + if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) { + return err + } + + secretPrefixes := sets.NewString() + optionalSecretPrefixes := sets.NewString() + configPrefixes := sets.NewString() + optionalConfigPrefixes := sets.NewString() + for _, prefix := range o.SecretNamePrefixes { + secretPrefixes.Insert(o.nameFor(prefix)) + } + for _, prefix := range o.OptionalSecretNamePrefixes { + optionalSecretPrefixes.Insert(o.nameFor(prefix)) + } + for _, prefix := range o.ConfigMapNamePrefixes { + configPrefixes.Insert(o.nameFor(prefix)) + } + for _, prefix := range o.OptionalConfigMapNamePrefixes { + optionalConfigPrefixes.Insert(o.nameFor(prefix)) + } + if err := o.copySecretsAndConfigMaps(ctx, resourceDir, secretPrefixes, optionalSecretPrefixes, configPrefixes, optionalConfigPrefixes, true); err != nil { + return err + } + + // Copy the current state of the certs as we see them. This primes us once and allows a kube-apiserver to start once + if len(o.CertDir) > 0 { + if err := o.copySecretsAndConfigMaps(ctx, o.CertDir, + sets.NewString(o.CertSecretNames...), + sets.NewString(o.OptionalCertSecretNamePrefixes...), + sets.NewString(o.CertConfigMapNamePrefixes...), + sets.NewString(o.OptionalCertConfigMapNamePrefixes...), + false, + ); err != nil { + return err + } + } + + // Gather pod yaml from config map + var podContent string + + err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) { + klog.Infof("Getting pod configmaps/%s -n %s", o.nameFor(o.PodConfigMapNamePrefix), o.Namespace) + podConfigMap, err := o.KubeClient.CoreV1().ConfigMaps(o.Namespace).Get(o.nameFor(o.PodConfigMapNamePrefix), metav1.GetOptions{}) + if err != nil { + return false, err + } + podData, exists := podConfigMap.Data["pod.yaml"] + if !exists { + return true, fmt.Errorf("required 'pod.yaml' key does not exist in configmap") + } + podContent = strings.Replace(podData, "REVISION", o.Revision, -1) + return true, nil + }) + if err != nil { + return err + } + + // Write secrets, config maps and pod to disk + // This does not need timeout, instead we should fail hard when we are not able to write. + + podFileName := o.PodConfigMapNamePrefix + ".yaml" + klog.Infof("Writing pod manifest %q ...", path.Join(resourceDir, podFileName)) + if err := ioutil.WriteFile(path.Join(resourceDir, podFileName), []byte(podContent), 0644); err != nil { + return err + } + + // copy static pod + klog.Infof("Creating directory for static pod manifest %q ...", o.PodManifestDir) + if err := os.MkdirAll(o.PodManifestDir, 0755); err != nil { + return err + } + + for _, fn := range o.PodMutationFns { + klog.V(2).Infof("Customizing static pod ...") + pod := resourceread.ReadPodV1OrDie([]byte(podContent)) + if err := fn(pod); err != nil { + return err + } + podContent = resourceread.WritePodV1OrDie(pod) + } + + klog.Infof("Writing static pod manifest %q ...\n%s", path.Join(o.PodManifestDir, podFileName), podContent) + if err := ioutil.WriteFile(path.Join(o.PodManifestDir, podFileName), []byte(podContent), 0644); err != nil { + return err + } + + return nil +} + +func (o *InstallOptions) Run(ctx context.Context) error { + var eventTarget *corev1.ObjectReference + + err := retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { + var clientErr error + eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(o.KubeClient, o.Namespace, nil) + if clientErr != nil { + return false, clientErr + } + return true, nil + }) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + + recorder := events.NewRecorder(o.KubeClient.CoreV1().Events(o.Namespace), "static-pod-installer", eventTarget) + if err := o.copyContent(ctx); err != nil { + recorder.Warningf("StaticPodInstallerFailed", "Installing revision %s: %v", o.Revision, err) + return fmt.Errorf("failed to copy: %v", err) + } + + recorder.Eventf("StaticPodInstallerCompleted", "Successfully installed revision %s", o.Revision) + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go new file mode 100644 index 0000000000..390763b883 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go @@ -0,0 +1,67 @@ +package installerpod + +import ( + "golang.org/x/net/context" + "k8s.io/klog" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/library-go/pkg/operator/resource/retry" +) + +// getSecretWithRetry will attempt to get the secret from the API server and retry on any connection errors until +// the context is not done or secret is returned or a HTTP client error is returned. +// In case the optional flag is set, the 404 error is not reported and a nil object is returned instead. +func (o *InstallOptions) getSecretWithRetry(ctx context.Context, name string, isOptional bool) (*v1.Secret, error) { + var secret *v1.Secret + + err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) { + var clientErr error + secret, clientErr = o.KubeClient.CoreV1().Secrets(o.Namespace).Get(name, metav1.GetOptions{}) + if clientErr != nil { + klog.Infof("Failed to get secret %s/%s: %v", o.Namespace, name, clientErr) + return false, clientErr + } + return true, nil + }) + + switch { + case err == nil: + klog.Infof("Got secret %s/%s", o.Namespace, name) + return secret, nil + case errors.IsNotFound(err) && isOptional: + return nil, nil + default: + return nil, err + } + +} + +// getConfigMapWithRetry will attempt to get the configMap from the API server and retry on any connection errors until +// the context is not done or configMap is returned or a HTTP client error is returned. +// In case the optional flag is set, the 404 error is not reported and a nil object is returned instead. +func (o *InstallOptions) getConfigMapWithRetry(ctx context.Context, name string, isOptional bool) (*v1.ConfigMap, error) { + var config *v1.ConfigMap + + err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) { + var clientErr error + config, clientErr = o.KubeClient.CoreV1().ConfigMaps(o.Namespace).Get(name, metav1.GetOptions{}) + if clientErr != nil { + klog.Infof("Failed to get config map %s/%s: %v", o.Namespace, name, clientErr) + return false, clientErr + } + return true, nil + }) + + switch { + case err == nil: + klog.Infof("Got configMap %s/%s", o.Namespace, name) + return config, nil + case errors.IsNotFound(err) && isOptional: + return nil, nil + default: + return nil, err + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go new file mode 100644 index 0000000000..2db8cda746 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go @@ -0,0 +1,116 @@ +package prune + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/util/sets" +) + +type PruneOptions struct { + MaxEligibleRevision int + ProtectedRevisions []int + + ResourceDir string + StaticPodName string +} + +func NewPruneOptions() *PruneOptions { + return &PruneOptions{} +} + +func NewPrune() *cobra.Command { + o := NewPruneOptions() + + cmd := &cobra.Command{ + Use: "prune", + Short: "Prune static pod installer revisions", + Run: func(cmd *cobra.Command, args []string) { + klog.V(1).Info(cmd.Flags()) + klog.V(1).Info(spew.Sdump(o)) + + if err := o.Validate(); err != nil { + klog.Fatal(err) + } + if err := o.Run(); err != nil { + klog.Fatal(err) + } + }, + } + + o.AddFlags(cmd.Flags()) + + return cmd +} + +func (o *PruneOptions) AddFlags(fs *pflag.FlagSet) { + fs.IntVar(&o.MaxEligibleRevision, "max-eligible-revision", o.MaxEligibleRevision, "highest revision ID to be eligible for pruning") + fs.IntSliceVar(&o.ProtectedRevisions, "protected-revisions", o.ProtectedRevisions, "list of revision IDs to preserve (not delete)") + fs.StringVar(&o.ResourceDir, "resource-dir", o.ResourceDir, "directory for all files supporting the static pod manifest") + fs.StringVar(&o.StaticPodName, "static-pod-name", o.StaticPodName, "name of the static pod") +} + +func (o *PruneOptions) Validate() error { + if len(o.ResourceDir) == 0 { + return fmt.Errorf("--resource-dir is required") + } + if o.MaxEligibleRevision == 0 { + return fmt.Errorf("--max-eligible-id is required") + } + if len(o.StaticPodName) == 0 { + return fmt.Errorf("--static-pod-name is required") + } + + return nil +} + +func (o *PruneOptions) Run() error { + protectedIDs := sets.NewInt(o.ProtectedRevisions...) + + files, err := ioutil.ReadDir(o.ResourceDir) + if err != nil { + return err + } + + for _, file := range files { + // If the file is not a resource directory... + if !file.IsDir() { + continue + } + // And doesn't match our static pod prefix... + if !strings.HasPrefix(file.Name(), o.StaticPodName) { + continue + } + + // Split file name to get just the integer revision ID + fileSplit := strings.Split(file.Name(), o.StaticPodName+"-") + revisionID, err := strconv.Atoi(fileSplit[len(fileSplit)-1]) + if err != nil { + return err + } + + // And is not protected... + if protected := protectedIDs.Has(revisionID); protected { + continue + } + // And is less than or equal to the maxEligibleRevisionID + if revisionID > o.MaxEligibleRevision { + continue + } + + err = os.RemoveAll(path.Join(o.ResourceDir, file.Name())) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go new file mode 100644 index 0000000000..60979a379f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go @@ -0,0 +1,134 @@ +package status + +import ( + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// unionCondition returns a single cluster operator condition that is the union of multiple operator conditions. +func unionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return internalUnionCondition(conditionType, defaultConditionStatus, false, allConditions...) +} + +// unionInertialCondition returns a single cluster operator condition that is the union of multiple operator conditions, +// but resists returning a condition with a status opposite the defaultConditionStatus. +func unionInertialCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return internalUnionCondition(conditionType, defaultConditionStatus, true, allConditions...) +} + +// internalUnionCondition returns a single cluster operator condition that is the union of multiple operator conditions. +// +// defaultConditionStatus indicates whether you want to merge all Falses or merge all Trues. For instance, Failures merge +// on true, but Available merges on false. Thing of it like an anti-default. +// +// If hasInertia, then resist returning a condition with a status opposite the defaultConditionStatus. +func internalUnionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, hasInertia bool, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + var oppositeConditionStatus operatorv1.ConditionStatus + if defaultConditionStatus == operatorv1.ConditionTrue { + oppositeConditionStatus = operatorv1.ConditionFalse + } else { + oppositeConditionStatus = operatorv1.ConditionTrue + } + + interestingConditions := []operatorv1.OperatorCondition{} + badConditions := []operatorv1.OperatorCondition{} + for _, condition := range allConditions { + if strings.HasSuffix(condition.Type, conditionType) { + interestingConditions = append(interestingConditions, condition) + + if condition.Status == oppositeConditionStatus { + badConditions = append(badConditions, condition) + } + } + } + + unionedCondition := operatorv1.OperatorCondition{Type: conditionType, Status: operatorv1.ConditionUnknown} + if len(interestingConditions) == 0 { + unionedCondition.Status = operatorv1.ConditionUnknown + unionedCondition.Reason = "NoData" + return OperatorConditionToClusterOperatorCondition(unionedCondition) + } + + oneMinuteAgo := time.Now().Add(-1 * time.Minute) + earliestBadConditionNotOldEnough := earliestTransitionTime(badConditions).Time.After(oneMinuteAgo) + if len(badConditions) == 0 || (hasInertia && earliestBadConditionNotOldEnough) { + unionedCondition.Status = defaultConditionStatus + unionedCondition.Message = unionMessage(interestingConditions) + unionedCondition.Reason = "AsExpected" + unionedCondition.LastTransitionTime = latestTransitionTime(interestingConditions) + + return OperatorConditionToClusterOperatorCondition(unionedCondition) + } + + // at this point we have bad conditions + unionedCondition.Status = oppositeConditionStatus + unionedCondition.Message = unionMessage(badConditions) + unionedCondition.Reason = unionReason(badConditions) + unionedCondition.LastTransitionTime = latestTransitionTime(badConditions) + + return OperatorConditionToClusterOperatorCondition(unionedCondition) +} + +func latestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time { + latestTransitionTime := metav1.Time{} + for _, condition := range conditions { + if latestTransitionTime.Before(&condition.LastTransitionTime) { + latestTransitionTime = condition.LastTransitionTime + } + } + return latestTransitionTime +} + +func earliestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time { + earliestTransitionTime := metav1.Now() + for _, condition := range conditions { + if !earliestTransitionTime.Before(&condition.LastTransitionTime) { + earliestTransitionTime = condition.LastTransitionTime + } + } + return earliestTransitionTime +} + +func uniq(s []string) []string { + seen := make(map[string]struct{}, len(s)) + j := 0 + for _, v := range s { + if _, ok := seen[v]; ok { + continue + } + seen[v] = struct{}{} + s[j] = v + j++ + } + return s[:j] +} + +func unionMessage(conditions []operatorv1.OperatorCondition) string { + messages := []string{} + for _, condition := range conditions { + if len(condition.Message) == 0 { + continue + } + for _, message := range uniq(strings.Split(condition.Message, "\n")) { + messages = append(messages, fmt.Sprintf("%s: %s", condition.Type, message)) + } + } + return strings.Join(messages, "\n") +} + +func unionReason(conditions []operatorv1.OperatorCondition) string { + if len(conditions) == 1 { + if len(conditions[0].Reason) != 0 { + return conditions[0].Type + conditions[0].Reason + } + return conditions[0].Type + } else { + return "MultipleConditionsMatching" + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go new file mode 100644 index 0000000000..2f0f6307e2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go @@ -0,0 +1,250 @@ +package status + +import ( + "fmt" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + configv1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +type VersionGetter interface { + // SetVersion is a way to set the version for an operand. It must be thread-safe + SetVersion(operandName, version string) + // GetVersion is way to get the versions for all operands. It must be thread-safe and return an object that doesn't mutate + GetVersions() map[string]string + // VersionChangedChannel is a channel that will get an item whenever SetVersion has been called + VersionChangedChannel() <-chan struct{} +} + +type StatusSyncer struct { + clusterOperatorName string + relatedObjects []configv1.ObjectReference + + versionGetter VersionGetter + operatorClient operatorv1helpers.OperatorClient + clusterOperatorClient configv1client.ClusterOperatorsGetter + clusterOperatorLister configv1listers.ClusterOperatorLister + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +func NewClusterOperatorStatusController( + name string, + relatedObjects []configv1.ObjectReference, + clusterOperatorClient configv1client.ClusterOperatorsGetter, + clusterOperatorInformer configv1informers.ClusterOperatorInformer, + operatorClient operatorv1helpers.OperatorClient, + versionGetter VersionGetter, + recorder events.Recorder, +) *StatusSyncer { + c := &StatusSyncer{ + clusterOperatorName: name, + relatedObjects: relatedObjects, + versionGetter: versionGetter, + clusterOperatorClient: clusterOperatorClient, + clusterOperatorLister: clusterOperatorInformer.Lister(), + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("status-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StatusSyncer-"+name), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + clusterOperatorInformer.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, clusterOperatorInformer.Informer().HasSynced) + + return c +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c StatusSyncer) sync() error { + detailedSpec, currentDetailedStatus, _, err := c.operatorClient.GetOperatorState() + if apierrors.IsNotFound(err) { + c.eventRecorder.Warningf("StatusNotFound", "Unable to determine current operator status for clusteroperator/%s", c.clusterOperatorName) + if err := c.clusterOperatorClient.ClusterOperators().Delete(c.clusterOperatorName, nil); err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil + } + if err != nil { + return err + } + + originalClusterOperatorObj, err := c.clusterOperatorLister.Get(c.clusterOperatorName) + if err != nil && !apierrors.IsNotFound(err) { + c.eventRecorder.Warningf("StatusFailed", "Unable to get current operator status for clusteroperator/%s: %v", c.clusterOperatorName, err) + return err + } + + // ensure that we have a clusteroperator resource + if originalClusterOperatorObj == nil || apierrors.IsNotFound(err) { + klog.Infof("clusteroperator/%s not found", c.clusterOperatorName) + var createErr error + originalClusterOperatorObj, createErr = c.clusterOperatorClient.ClusterOperators().Create(&configv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{Name: c.clusterOperatorName}, + }) + if apierrors.IsNotFound(createErr) { + // this means that the API isn't present. We did not fail. Try again later + klog.Infof("ClusterOperator API not created") + c.queue.AddRateLimited(workQueueKey) + return nil + } + if createErr != nil { + c.eventRecorder.Warningf("StatusCreateFailed", "Failed to create operator status: %v", err) + return createErr + } + } + clusterOperatorObj := originalClusterOperatorObj.DeepCopy() + + if detailedSpec.ManagementState == operatorv1.Unmanaged && !management.IsOperatorAlwaysManaged() { + clusterOperatorObj.Status = configv1.ClusterOperatorStatus{} + + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + + if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) { + return nil + } + if _, updateErr := c.clusterOperatorClient.ClusterOperators().UpdateStatus(clusterOperatorObj); err != nil { + return updateErr + } + c.eventRecorder.Eventf("OperatorStatusChanged", "Status for operator %s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status)) + return nil + } + + clusterOperatorObj.Status.RelatedObjects = c.relatedObjects + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionInertialCondition("Degraded", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Progressing", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Available", operatorv1.ConditionTrue, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Upgradeable", operatorv1.ConditionTrue, currentDetailedStatus.Conditions...)) + + // TODO work out removal. We don't always know the existing value, so removing early seems like a bad idea. Perhaps a remove flag. + versions := c.versionGetter.GetVersions() + for operand, version := range versions { + previousVersion := operatorv1helpers.SetOperandVersion(&clusterOperatorObj.Status.Versions, configv1.OperandVersion{Name: operand, Version: version}) + if previousVersion != version { + // having this message will give us a marker in events when the operator updated compared to when the operand is updated + c.eventRecorder.Eventf("OperatorVersionChanged", "clusteroperator/%s version %q changed from %q to %q", c.clusterOperatorName, operand, previousVersion, version) + } + } + + // if we have no diff, just return + if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) { + return nil + } + klog.V(2).Infof("clusteroperator/%s diff %v", c.clusterOperatorName, resourceapply.JSONPatch(originalClusterOperatorObj, clusterOperatorObj)) + + if _, updateErr := c.clusterOperatorClient.ClusterOperators().UpdateStatus(clusterOperatorObj); err != nil { + return updateErr + } + c.eventRecorder.Eventf("OperatorStatusChanged", "Status for clusteroperator/%s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status)) + return nil +} + +func OperatorConditionToClusterOperatorCondition(condition operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return configv1.ClusterOperatorStatusCondition{ + Type: configv1.ClusterStatusConditionType(condition.Type), + Status: configv1.ConditionStatus(condition.Status), + LastTransitionTime: condition.LastTransitionTime, + Reason: condition.Reason, + Message: condition.Message, + } +} + +func (c *StatusSyncer) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting StatusSyncer-" + c.clusterOperatorName) + defer klog.Infof("Shutting down StatusSyncer-" + c.clusterOperatorName) + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // start watching for version changes + go c.watchVersionGetter(stopCh) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *StatusSyncer) watchVersionGetter(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + versionCh := c.versionGetter.VersionChangedChannel() + // always kick at least once + c.queue.Add(workQueueKey) + + for { + select { + case <-stopCh: + return + case <-versionCh: + c.queue.Add(workQueueKey) + } + } +} + +func (c *StatusSyncer) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *StatusSyncer) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *StatusSyncer) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go new file mode 100644 index 0000000000..5543a602d2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go @@ -0,0 +1,85 @@ +package status + +import ( + "os" + "sync" + + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/events" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type versionGetter struct { + lock sync.Mutex + versions map[string]string + notificationChannels []chan struct{} +} + +const ( + operandImageVersionEnvVarName = "OPERAND_IMAGE_VERSION" +) + +func NewVersionGetter() VersionGetter { + return &versionGetter{ + versions: map[string]string{}, + } +} + +func (v *versionGetter) SetVersion(operandName, version string) { + v.lock.Lock() + defer v.lock.Unlock() + + v.versions[operandName] = version + + for i := range v.notificationChannels { + ch := v.notificationChannels[i] + // don't let a slow consumer block the rest + go func() { + ch <- struct{}{} + }() + } +} + +func (v *versionGetter) GetVersions() map[string]string { + v.lock.Lock() + defer v.lock.Unlock() + + ret := map[string]string{} + for k, v := range v.versions { + ret[k] = v + } + return ret +} + +func (v *versionGetter) VersionChangedChannel() <-chan struct{} { + v.lock.Lock() + defer v.lock.Unlock() + + channel := make(chan struct{}, 50) + v.notificationChannels = append(v.notificationChannels, channel) + return channel +} + +func VersionForOperandFromEnv() string { + return os.Getenv(operandImageVersionEnvVarName) +} + +func VersionForOperand(namespace, imagePullSpec string, configMapGetter corev1client.ConfigMapsGetter, eventRecorder events.Recorder) string { + versionMap := map[string]string{} + versionMapping, err := configMapGetter.ConfigMaps(namespace).Get("version-mapping", metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + eventRecorder.Warningf("VersionMappingFailure", "unable to get version mapping: %v", err) + return "" + } + if versionMapping != nil { + for version, image := range versionMapping.Data { + versionMap[image] = version + } + } + + // we have the actual daemonset and we need the pull spec + operandVersion := versionMap[imagePullSpec] + return operandVersion +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go new file mode 100644 index 0000000000..2f659617b2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go @@ -0,0 +1,194 @@ +package unsupportedconfigoverridescontroller + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "time" + + "k8s.io/klog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + unsupportedConfigOverridesControllerUpgradeable = "UnsupportedConfigOverridesUpgradeable" + controllerWorkQueueKey = "key" +) + +// UnsupportedConfigOverridesController is a controller that will copy source configmaps and secrets to their destinations. +// It will also mirror deletions by deleting destinations. +type UnsupportedConfigOverridesController struct { + operatorClient v1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewUnsupportedConfigOverridesController creates UnsupportedConfigOverridesController. +func NewUnsupportedConfigOverridesController( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, +) *UnsupportedConfigOverridesController { + c := &UnsupportedConfigOverridesController{ + operatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "UnsupportedConfigOverridesController"), + eventRecorder: eventRecorder.WithComponentSuffix("unsupported-config-overrides-controller"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +func (c *UnsupportedConfigOverridesController) sync() error { + operatorSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: unsupportedConfigOverridesControllerUpgradeable, + Status: operatorv1.ConditionTrue, + Reason: "NoUnsupportedConfigOverrides", + } + if len(operatorSpec.UnsupportedConfigOverrides.Raw) > 0 { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "UnsupportedConfigOverridesSet" + cond.Message = fmt.Sprintf("unsupportedConfigOverrides=%v", string(operatorSpec.UnsupportedConfigOverrides.Raw)) + + // try to get a prettier message + keys, err := keysSetInUnsupportedConfig(operatorSpec.UnsupportedConfigOverrides.Raw) + if err == nil { + cond.Message = fmt.Sprintf("setting: %v", keys.List()) + + } + } + + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil +} + +func keysSetInUnsupportedConfig(configYaml []byte) (sets.String, error) { + configJson, err := kyaml.ToJSON(configYaml) + if err != nil { + klog.Warning(err) + // maybe it's just json + configJson = configYaml + } + + config := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(configJson)).Decode(&config); err != nil { + return nil, err + } + + return keysSetInUnsupportedConfigMap([]string{}, config), nil +} + +func keysSetInUnsupportedConfigMap(pathSoFar []string, config map[string]interface{}) sets.String { + ret := sets.String{} + + for k, v := range config { + currPath := append(pathSoFar, k) + + switch castV := v.(type) { + case map[string]interface{}: + ret.Insert(keysSetInUnsupportedConfigMap(currPath, castV).UnsortedList()...) + case []interface{}: + ret.Insert(keysSetInUnsupportedConfigSlice(currPath, castV).UnsortedList()...) + default: + ret.Insert(strings.Join(currPath, ".")) + } + } + + return ret +} + +func keysSetInUnsupportedConfigSlice(pathSoFar []string, config []interface{}) sets.String { + ret := sets.String{} + + for index, v := range config { + currPath := append(pathSoFar, fmt.Sprintf("%d", index)) + + switch castV := v.(type) { + case map[string]interface{}: + ret.Insert(keysSetInUnsupportedConfigMap(currPath, castV).UnsortedList()...) + case []interface{}: + ret.Insert(keysSetInUnsupportedConfigSlice(currPath, castV).UnsortedList()...) + default: + ret.Insert(strings.Join(currPath, ".")) + } + } + + return ret +} + +func (c *UnsupportedConfigOverridesController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting UnsupportedConfigOverridesController") + defer klog.Infof("Shutting down UnsupportedConfigOverridesController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *UnsupportedConfigOverridesController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *UnsupportedConfigOverridesController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *UnsupportedConfigOverridesController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go new file mode 100644 index 0000000000..0038bc5590 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go @@ -0,0 +1,102 @@ +package v1helpers + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +type combinedConfigMapGetter struct { + client corev1client.ConfigMapsGetter + listers KubeInformersForNamespaces +} + +func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter { + return &combinedConfigMapGetter{ + client: client, + listers: listers, + } +} + +type combinedConfigMapInterface struct { + corev1client.ConfigMapInterface + lister corev1listers.ConfigMapNamespaceLister + namespace string +} + +func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface { + return combinedConfigMapInterface{ + ConfigMapInterface: g.client.ConfigMaps(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace), + namespace: namespace, + } +} + +func (g combinedConfigMapInterface) Get(name string, options metav1.GetOptions) (*corev1.ConfigMap, error) { + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} +func (g combinedConfigMapInterface) List(opts metav1.ListOptions) (*corev1.ConfigMapList, error) { + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.ConfigMapList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} + +type combinedSecretGetter struct { + client corev1client.SecretsGetter + listers KubeInformersForNamespaces +} + +func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter { + return &combinedSecretGetter{ + client: client, + listers: listers, + } +} + +type combinedSecretInterface struct { + corev1client.SecretInterface + lister corev1listers.SecretNamespaceLister + namespace string +} + +func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface { + return combinedSecretInterface{ + SecretInterface: g.client.Secrets(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace), + namespace: namespace, + } +} + +func (g combinedSecretInterface) Get(name string, options metav1.GetOptions) (*corev1.Secret, error) { + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} + +func (g combinedSecretInterface) List(opts metav1.ListOptions) (*corev1.SecretList, error) { + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.SecretList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go new file mode 100644 index 0000000000..8933328978 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go @@ -0,0 +1,7 @@ +package v1helpers + +import "k8s.io/client-go/informers" + +func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces { + return kubeInformersForNamespaces(informers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go new file mode 100644 index 0000000000..b7696bc652 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -0,0 +1,259 @@ +package v1helpers + +import ( + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/retry" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { + if versions == nil { + versions = &[]configv1.OperandVersion{} + } + existingVersion := FindOperandVersion(*versions, operandVersion.Name) + if existingVersion == nil { + *versions = append(*versions, operandVersion) + return "" + } + + previous := existingVersion.Version + existingVersion.Version = operandVersion.Version + return previous +} + +func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { + if versions == nil { + return nil + } + for i := range versions { + if versions[i].Name == name { + return &versions[i] + } + } + return nil +} + +func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + existingCondition := FindOperatorCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + newConditions := []operatorv1.OperatorCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue) +} + +func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse) +} + +func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// UpdateOperatorSpecFunc is a func that mutates an operator spec. +type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error + +// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client. +func UpdateSpec(client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) { + updated := false + var operatorSpec *operatorv1.OperatorSpec + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + oldSpec, _, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newSpec := oldSpec.DeepCopy() + for _, update := range updateFuncs { + if err := update(newSpec); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return nil + } + + operatorSpec, _, err = client.UpdateOperatorSpec(resourceVersion, newSpec) + updated = err == nil + return err + }) + + return operatorSpec, updated, err +} + +// UpdateSpecConfigFn returns a func to update the config. +func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc { + return func(oldSpec *operatorv1.OperatorSpec) error { + oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}} + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error + +// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client. +func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.OperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + return nil + } + + updatedOperatorStatus, err = client.UpdateOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateConditionFunc returns a func to update a condition. +func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc { + return func(oldStatus *operatorv1.OperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error + +// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client. +func UpdateStaticPodStatus(client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateStaticPodConditionFn returns a func to update a condition. +func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc { + return func(oldStatus *operatorv1.StaticPodOperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +type aggregate []error + +var _ utilerrors.Aggregate = aggregate{} + +// NewMultiLineAggregate returns an aggregate error with multi-line output +func NewMultiLineAggregate(errList []error) error { + var errs []error + for _, e := range errList { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// Error is part of the error interface. +func (agg aggregate) Error() string { + msgs := make([]string, len(agg)) + for i := range agg { + msgs[i] = agg[i].Error() + } + return strings.Join(msgs, "\n") +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go new file mode 100644 index 0000000000..8a3636b334 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -0,0 +1,105 @@ +package v1helpers + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power +type KubeInformersForNamespaces interface { + Start(stopCh <-chan struct{}) + InformersFor(namespace string) informers.SharedInformerFactory + Namespaces() sets.String + + ConfigMapLister() corev1listers.ConfigMapLister + SecretLister() corev1listers.SecretLister +} + +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + ret := kubeInformersForNamespaces{} + for _, namespace := range namespaces { + if len(namespace) == 0 { + ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + continue + } + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + } + + return ret +} + +type kubeInformersForNamespaces map[string]informers.SharedInformerFactory + +func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { + for _, informer := range i { + informer.Start(stopCh) + } +} + +func (i kubeInformersForNamespaces) Namespaces() sets.String { + return sets.StringKeySet(i) +} +func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory { + return i[namespace] +} + +func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool { + return i.InformersFor(namespace) != nil +} + +type configMapLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister { + return configMapLister(i) +} + +func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().ConfigMaps().Lister().List(selector) +} + +func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace) +} + +type secretLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister { + return secretLister(i) +} + +func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Secrets().Lister().List(selector) +} + +func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Secrets().Lister().Secrets(namespace) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go new file mode 100644 index 0000000000..4afb23a612 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -0,0 +1,30 @@ +package v1helpers + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + "k8s.io/client-go/tools/cache" +) + +type OperatorClient interface { + Informer() cache.SharedIndexInformer + // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. + GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. + UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. + UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) +} + +type StaticPodOperatorClient interface { + OperatorClient + // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version, + // potentially from a lister. + GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version + // directly from a server read. + GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. + UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go new file mode 100644 index 0000000000..014585c551 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -0,0 +1,215 @@ +package v1helpers + +import ( + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. +func NewFakeSharedIndexInformer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +type fakeSharedIndexInformer struct{} + +func (fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) { +} + +func (fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { +} + +func (fakeSharedIndexInformer) GetStore() cache.Store { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetController() cache.Controller { + panic("implement me") +} + +func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) { + panic("implement me") +} + +func (fakeSharedIndexInformer) HasSynced() bool { + panic("implement me") +} + +func (fakeSharedIndexInformer) LastSyncResourceVersion() string { + panic("implement me") +} + +func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { + panic("implement me") +} + +// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeStaticPodOperatorClient( + staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + return &fakeStaticPodOperatorClient{ + fakeStaticPodOperatorSpec: staticPodSpec, + fakeStaticPodOperatorStatus: staticPodStatus, + resourceVersion: "0", + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, + } +} + +type fakeStaticPodOperatorClient struct { + fakeOperatorStatus *operatorv1.OperatorStatus + fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec + fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error +} + +func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus = status + return c.fakeStaticPodOperatorStatus, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, "", nil +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(string, *operatorv1.OperatorStatus) (status *operatorv1.OperatorStatus, err error) { + panic("not supported") +} + +// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test +func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister { + return &fakeNodeLister{client: client} +} + +type fakeNodeLister struct { + client kubernetes.Interface +} + +func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) { + nodes, err := n.client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + ret := []*corev1.Node{} + for i := range nodes.Items { + ret = append(ret, &nodes.Items[i]) + } + return ret, nil +} + +func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { + panic("implement me") +} + +func (n *fakeNodeLister) ListWithPredicate(predicate corev1listers.NodeConditionPredicate) ([]*corev1.Node, error) { + panic("implement me") +} + +// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClient { + return &fakeOperatorClient{ + fakeOperatorSpec: spec, + fakeOperatorStatus: status, + resourceVersion: "0", + triggerStatusUpdateError: triggerErr, + } +} + +type fakeOperatorClient struct { + fakeOperatorSpec *operatorv1.OperatorSpec + fakeOperatorStatus *operatorv1.OperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error +} + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeOperatorStatus = status + return c.fakeOperatorStatus, nil +} +func (c *fakeOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go b/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go new file mode 100644 index 0000000000..40d615583c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go @@ -0,0 +1,67 @@ +package versioning + +import ( + "github.com/blang/semver" +) + +type VersionRange interface { + Between(needle *semver.Version) bool + BetweenOrEmpty(needle *semver.Version) bool +} + +type versionRange struct { + lowerInclusive bool + lower semver.Version + + upperInclusive bool + upper semver.Version +} + +// NewRange is the "normal" [1.1.0, 1.2) +func NewRange(lowerInclusive, upperExclusive string) (VersionRange, error) { + lower, err := semver.Parse(lowerInclusive) + if err != nil { + return nil, err + } + upper, err := semver.Parse(upperExclusive) + if err != nil { + return nil, err + } + + return &versionRange{ + lowerInclusive: true, + lower: lower, + upper: upper, + }, nil +} + +func NewRangeOrDie(lowerInclusive, upperExclusive string) VersionRange { + ret, err := NewRange(lowerInclusive, upperExclusive) + if err != nil { + panic(err) + } + return ret +} + +func (r versionRange) Between(needle *semver.Version) bool { + switch { + case r.lowerInclusive && !r.upperInclusive: + return needle.GTE(r.lower) && needle.LT(r.upper) + case r.lowerInclusive && r.upperInclusive: + return needle.GTE(r.lower) && needle.LTE(r.upper) + case !r.lowerInclusive && !r.upperInclusive: + return needle.GT(r.lower) && needle.LT(r.upper) + case !r.lowerInclusive && r.upperInclusive: + return needle.GT(r.lower) && needle.LTE(r.upper) + + } + + panic("math broke") +} + +func (r versionRange) BetweenOrEmpty(needle *semver.Version) bool { + if needle == nil { + return true + } + return r.Between(needle) +} diff --git a/vendor/github.com/openshift/library-go/pkg/proc/reaper.go b/vendor/github.com/openshift/library-go/pkg/proc/reaper.go new file mode 100644 index 0000000000..21f5f71ff5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/proc/reaper.go @@ -0,0 +1,37 @@ +// +build linux + +package proc + +import ( + "os" + "os/signal" + "syscall" + + "k8s.io/klog" +) + +// StartReaper starts a goroutine to reap processes if called from a process +// that has pid 1. +func StartReaper() { + if os.Getpid() == 1 { + klog.V(4).Infof("Launching reaper") + go func() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGCHLD) + for { + // Wait for a child to terminate + sig := <-sigs + klog.V(4).Infof("Signal received: %v", sig) + for { + // Reap processes + cpid, _ := syscall.Wait4(-1, nil, syscall.WNOHANG, nil) + if cpid < 1 { + break + } + + klog.V(4).Infof("Reaped process with pid %d", cpid) + } + } + }() + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go b/vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go new file mode 100644 index 0000000000..75644fa5a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package proc + +// StartReaper has no effect on non-linux platforms. +// Support for other unices will be added. +func StartReaper() { +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go b/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go new file mode 100644 index 0000000000..4f6802ae3a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go @@ -0,0 +1,36 @@ +package serviceability + +import ( + "os" + "strings" + + "github.com/sirupsen/logrus" +) + +// InitLogrus initializes logrus by setting a loglevel for it. +func InitLogrus(level string) { + if len(level) == 0 { + return + } + level = strings.ToUpper(level) + switch level { + case "DEBUG": + logrus.SetLevel(logrus.DebugLevel) + case "INFO": + logrus.SetLevel(logrus.InfoLevel) + case "WARN": + logrus.SetLevel(logrus.WarnLevel) + case "ERROR": + logrus.SetLevel(logrus.ErrorLevel) + case "FATAL": + logrus.SetLevel(logrus.FatalLevel) + case "PANIC": + logrus.SetLevel(logrus.PanicLevel) + default: + return + } + + logrus.SetFormatter(&logrus.TextFormatter{}) + logrus.SetOutput(os.Stdout) + +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/panic.go b/vendor/github.com/openshift/library-go/pkg/serviceability/panic.go new file mode 100644 index 0000000000..506298af6d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/panic.go @@ -0,0 +1,93 @@ +package serviceability + +import ( + "encoding/json" + "strings" + "time" + + "k8s.io/klog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" +) + +// BehaviorOnPanic is a helper for setting the crash mode of OpenShift when a panic is caught. +// It returns a function that should be the defer handler for the caller. +func BehaviorOnPanic(modeString string, productVersion version.Info) func() { + modes := []string{} + if err := json.Unmarshal([]byte(modeString), &modes); err != nil { + return behaviorOnPanic(modeString, productVersion) + } + + fns := []func(){} + + for _, mode := range modes { + fns = append(fns, behaviorOnPanic(mode, productVersion)) + } + + return func() { + for _, fn := range fns { + fn() + } + } +} + +func behaviorOnPanic(mode string, productVersion version.Info) func() { + doNothing := func() {} + + switch { + case mode == "crash": + klog.Infof("Process will terminate as soon as a panic occurs.") + utilruntime.ReallyCrash = true + return doNothing + + case strings.HasPrefix(mode, "crash-after-delay:"): + delayDurationString := strings.TrimPrefix(mode, "crash-after-delay:") + delayDuration, err := time.ParseDuration(delayDurationString) + if err != nil { + klog.Errorf("Unable to start crash-after-delay. Crashing immediately instead: %v", err) + utilruntime.ReallyCrash = true + return doNothing + } + klog.Infof("Process will terminate %v after a panic occurs.", delayDurationString) + utilruntime.ReallyCrash = false + utilruntime.PanicHandlers = append(utilruntime.PanicHandlers, crashOnDelay(delayDuration, delayDurationString)) + return doNothing + + case strings.HasPrefix(mode, "sentry:"): + url := strings.TrimPrefix(mode, "sentry:") + m, err := NewSentryMonitor(url, productVersion) + if err != nil { + klog.Errorf("Unable to start Sentry for panic tracing: %v", err) + return doNothing + } + klog.Infof("Process will log all panics and errors to Sentry.") + utilruntime.ReallyCrash = false + utilruntime.PanicHandlers = append(utilruntime.PanicHandlers, m.CapturePanic) + utilruntime.ErrorHandlers = append(utilruntime.ErrorHandlers, m.CaptureError) + return func() { + if r := recover(); r != nil { + m.CapturePanicAndWait(r, 2*time.Second) + panic(r) + } + } + case len(mode) == 0: + // default panic behavior + utilruntime.ReallyCrash = false + return doNothing + + default: + klog.Errorf("Unrecognized panic behavior") + return doNothing + } +} + +func crashOnDelay(delay time.Duration, delayString string) func(interface{}) { + return func(in interface{}) { + go func() { + klog.Errorf("Panic happened. Process will crash in %v.", delayString) + time.Sleep(delay) + panic(in) + }() + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go b/vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go new file mode 100644 index 0000000000..ded45eb29c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go @@ -0,0 +1,34 @@ +package serviceability + +import ( + "fmt" + "net/http" + "os" + "runtime" + + _ "net/http/pprof" // include the default Go profiler mux + + "k8s.io/klog" +) + +// StartProfiler starts the golang profiler on a port if `web` is specified. It uses the "standard" openshift env vars +func StartProfiler() { + if env("OPENSHIFT_PROFILE", "") == "web" { + go func() { + runtime.SetBlockProfileRate(1) + profilePort := env("OPENSHIFT_PROFILE_PORT", "6060") + profileHost := env("OPENSHIFT_PROFILE_HOST", "127.0.0.1") + klog.Infof(fmt.Sprintf("Starting profiling endpoint at http://%s:%s/debug/pprof/", profileHost, profilePort)) + klog.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%s", profileHost, profilePort), nil)) + }() + } +} + +// env returns an environment variable or a default value if not specified. +func env(key string, defaultValue string) string { + val := os.Getenv(key) + if len(val) == 0 { + return defaultValue + } + return val +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go b/vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go new file mode 100644 index 0000000000..42968df3af --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go @@ -0,0 +1,62 @@ +package serviceability + +import ( + "errors" + "fmt" + "time" + + "github.com/getsentry/raven-go" + + "k8s.io/apimachinery/pkg/version" +) + +// SentryMonitor encapsulates a Sentry client and set of default tags +type SentryMonitor struct { + client *raven.Client + tags map[string]string +} + +// NewSentryMonitor creates a class that can capture panics and errors from OpenShift +// and Kubernetes that can roll up to a Sentry server. +func NewSentryMonitor(url string, version version.Info) (*SentryMonitor, error) { + client, err := raven.NewClient(url, nil) + if err != nil { + return nil, err + } + client.SetRelease(version.GitCommit) + return &SentryMonitor{ + client: client, + }, nil +} + +func (m *SentryMonitor) capturePanic(capture interface{}) chan error { + var packet *raven.Packet + switch rval := capture.(type) { + case error: + packet = raven.NewPacket(rval.Error(), raven.NewException(rval, raven.NewStacktrace(2, 3, nil))) + default: + rvalStr := fmt.Sprint(rval) + packet = raven.NewPacket(rvalStr, raven.NewException(errors.New(rvalStr), raven.NewStacktrace(2, 3, nil))) + } + _, ch := m.client.Capture(packet, m.tags) + return ch +} + +// CapturePanic is used by the Sentry client to capture panics +func (m *SentryMonitor) CapturePanic(capture interface{}) { + m.capturePanic(capture) +} + +// CapturePanicAndWait waits until either the Sentry client captures a panic or +// the provided time expires +func (m *SentryMonitor) CapturePanicAndWait(capture interface{}, until time.Duration) { + select { + case <-m.capturePanic(capture): + case <-time.After(until): + } +} + +// CaptureError is used by the Sentry client to capture errors +func (m *SentryMonitor) CaptureError(err error) { + m.client.CaptureError(err, m.tags) +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go b/vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go new file mode 100644 index 0000000000..5070e74d7f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go @@ -0,0 +1,62 @@ +package serviceability + +import ( + "os" + "os/signal" + "strings" + "syscall" + + "github.com/pkg/profile" +) + +// Stop is a function to defer in your main call to provide profile info. +type Stop interface { + Stop() +} + +type stopper struct{} + +func (stopper) Stop() {} + +// Profile returns an interface to defer for a profile: `defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()` is common. +// Suffixing the mode with `-tmp` will have the profiler write the run to a temporary directory with a unique name, which +// is useful when running the same command multiple times. +func Profile(mode string) Stop { + path := "." + if strings.HasSuffix(mode, "-tmp") { + mode = strings.TrimSuffix(mode, "-tmp") + path = "" + } + var stop Stop + switch mode { + case "mem": + stop = profileOnExit(profile.Start(profile.MemProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "cpu": + stop = profileOnExit(profile.Start(profile.CPUProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "block": + stop = profileOnExit(profile.Start(profile.BlockProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "mutex": + stop = profileOnExit(profile.Start(profile.MutexProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "trace": + stop = profileOnExit(profile.Start(profile.TraceProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + default: + stop = stopper{} + } + return stop +} + +func profileOnExit(s Stop) Stop { + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c + // Programs with more sophisticated signal handling + // should ensure the Stop() function returned from + // Start() is called during shutdown. + // See http://godoc.org/github.com/pkg/profile + s.Stop() + + os.Exit(1) + }() + return s +} diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE new file mode 100644 index 0000000000..49ea0f9288 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go new file mode 100644 index 0000000000..c94b3848a0 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: Figure out what gopkg.in should do. + +package modfile + +import "strings" + +// ParseGopkgIn splits gopkg.in import paths into their constituent parts +func ParseGopkgIn(path string) (root, repo, major, subdir string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return + } + f := strings.Split(path, "/") + if len(f) >= 2 { + if elem, v, ok := dotV(f[1]); ok { + root = strings.Join(f[:2], "/") + repo = "github.com/go-" + elem + "/" + elem + major = v + subdir = strings.Join(f[2:], "/") + return root, repo, major, subdir, true + } + } + if len(f) >= 3 { + if elem, v, ok := dotV(f[2]); ok { + root = strings.Join(f[:3], "/") + repo = "github.com/" + f[1] + "/" + elem + major = v + subdir = strings.Join(f[3:], "/") + return root, repo, major, subdir, true + } + } + return +} + +func dotV(name string) (elem, v string, ok bool) { + i := len(name) - 1 + for i >= 0 && '0' <= name[i] && name[i] <= '9' { + i-- + } + if i <= 2 || i+1 >= len(name) || name[i-1] != '.' || name[i] != 'v' || name[i+1] == '0' && len(name) != i+2 { + return "", "", false + } + return name[:i-1], name[i:], true +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/print.go b/vendor/github.com/rogpeppe/go-internal/modfile/print.go new file mode 100644 index 0000000000..7b1dd8f953 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/print.go @@ -0,0 +1,164 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modfile implements parsing and formatting for +// go.mod files. +package modfile + +import ( + "bytes" + "fmt" + "strings" +) + +func Format(f *FileSyntax) []byte { + pr := &printer{} + pr.file(f) + return pr.Bytes() +} + +// A printer collects the state during printing of a file or expression. +type printer struct { + bytes.Buffer // output buffer + comment []Comment // pending end-of-line comments + margin int // left margin (indent), a number of tabs +} + +// printf prints to the buffer. +func (p *printer) printf(format string, args ...interface{}) { + fmt.Fprintf(p, format, args...) +} + +// indent returns the position on the current line, in bytes, 0-indexed. +func (p *printer) indent() int { + b := p.Bytes() + n := 0 + for n < len(b) && b[len(b)-1-n] != '\n' { + n++ + } + return n +} + +// newline ends the current line, flushing end-of-line comments. +func (p *printer) newline() { + if len(p.comment) > 0 { + p.printf(" ") + for i, com := range p.comment { + if i > 0 { + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + } + p.printf("%s", strings.TrimSpace(com.Token)) + } + p.comment = p.comment[:0] + } + + p.trim() + p.printf("\n") + for i := 0; i < p.margin; i++ { + p.printf("\t") + } +} + +// trim removes trailing spaces and tabs from the current line. +func (p *printer) trim() { + // Remove trailing spaces and tabs from line we're about to end. + b := p.Bytes() + n := len(b) + for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { + n-- + } + p.Truncate(n) +} + +// file formats the given file into the print buffer. +func (p *printer) file(f *FileSyntax) { + for _, com := range f.Before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + for i, stmt := range f.Stmt { + switch x := stmt.(type) { + case *CommentBlock: + // comments already handled + p.expr(x) + + default: + p.expr(x) + p.newline() + } + + for _, com := range stmt.Comment().After { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + + if i+1 < len(f.Stmt) { + p.newline() + } + } +} + +func (p *printer) expr(x Expr) { + // Emit line-comments preceding this expression. + if before := x.Comment().Before; len(before) > 0 { + // Want to print a line comment. + // Line comments must be at the current margin. + p.trim() + if p.indent() > 0 { + // There's other text on the line. Start a new line. + p.printf("\n") + } + // Re-indent to margin. + for i := 0; i < p.margin; i++ { + p.printf("\t") + } + for _, com := range before { + p.printf("%s", strings.TrimSpace(com.Token)) + p.newline() + } + } + + switch x := x.(type) { + default: + panic(fmt.Errorf("printer: unexpected type %T", x)) + + case *CommentBlock: + // done + + case *LParen: + p.printf("(") + case *RParen: + p.printf(")") + + case *Line: + sep := "" + for _, tok := range x.Token { + p.printf("%s%s", sep, tok) + sep = " " + } + + case *LineBlock: + for _, tok := range x.Token { + p.printf("%s ", tok) + } + p.expr(&x.LParen) + p.margin++ + for _, l := range x.Line { + p.newline() + p.expr(l) + } + p.margin-- + p.newline() + p.expr(&x.RParen) + } + + // Queue end-of-line comments for printing when we + // reach the end of the line. + p.comment = append(p.comment, x.Comment().Suffix...) +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/read.go b/vendor/github.com/rogpeppe/go-internal/modfile/read.go new file mode 100644 index 0000000000..1d81ff1ab7 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/read.go @@ -0,0 +1,869 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Module file parser. +// This is a simplified copy of Google's buildifier parser. + +package modfile + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A Position describes the position between two bytes of input. +type Position struct { + Line int // line in input (starting at 1) + LineRune int // rune in line (starting at 1) + Byte int // byte in input (starting at 0) +} + +// add returns the position at the end of s, assuming it starts at p. +func (p Position) add(s string) Position { + p.Byte += len(s) + if n := strings.Count(s, "\n"); n > 0 { + p.Line += n + s = s[strings.LastIndex(s, "\n")+1:] + p.LineRune = 1 + } + p.LineRune += utf8.RuneCountInString(s) + return p +} + +// An Expr represents an input element. +type Expr interface { + // Span returns the start and end position of the expression, + // excluding leading or trailing comments. + Span() (start, end Position) + + // Comment returns the comments attached to the expression. + // This method would normally be named 'Comments' but that + // would interfere with embedding a type of the same name. + Comment() *Comments +} + +// A Comment represents a single // comment. +type Comment struct { + Start Position + Token string // without trailing newline + Suffix bool // an end of line (not whole line) comment +} + +// Comments collects the comments associated with an expression. +type Comments struct { + Before []Comment // whole-line comments before this expression + Suffix []Comment // end-of-line comments after this expression + + // For top-level expressions only, After lists whole-line + // comments following the expression. + After []Comment +} + +// Comment returns the receiver. This isn't useful by itself, but +// a Comments struct is embedded into all the expression +// implementation types, and this gives each of those a Comment +// method to satisfy the Expr interface. +func (c *Comments) Comment() *Comments { + return c +} + +// A FileSyntax represents an entire go.mod file. +type FileSyntax struct { + Name string // file path + Comments + Stmt []Expr +} + +func (x *FileSyntax) Span() (start, end Position) { + if len(x.Stmt) == 0 { + return + } + start, _ = x.Stmt[0].Span() + _, end = x.Stmt[len(x.Stmt)-1].Span() + return start, end +} + +func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { + if hint == nil { + // If no hint given, add to the last statement of the given type. + Loop: + for i := len(x.Stmt) - 1; i >= 0; i-- { + stmt := x.Stmt[i] + switch stmt := stmt.(type) { + case *Line: + if stmt.Token != nil && stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + case *LineBlock: + if stmt.Token[0] == tokens[0] { + hint = stmt + break Loop + } + } + } + } + + if hint != nil { + for i, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt == hint { + // Convert line to line block. + stmt.InBlock = true + block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} + stmt.Token = stmt.Token[1:] + x.Stmt[i] = block + new := &Line{Token: tokens[1:], InBlock: true} + block.Line = append(block.Line, new) + return new + } + case *LineBlock: + if stmt == hint { + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line = append(stmt.Line, new) + return new + } + for j, line := range stmt.Line { + if line == hint { + // Add new line after hint. + stmt.Line = append(stmt.Line, nil) + copy(stmt.Line[j+2:], stmt.Line[j+1:]) + new := &Line{Token: tokens[1:], InBlock: true} + stmt.Line[j+1] = new + return new + } + } + } + } + } + + new := &Line{Token: tokens} + x.Stmt = append(x.Stmt, new) + return new +} + +func (x *FileSyntax) updateLine(line *Line, tokens ...string) { + if line.InBlock { + tokens = tokens[1:] + } + line.Token = tokens +} + +func (x *FileSyntax) removeLine(line *Line) { + line.Token = nil +} + +// Cleanup cleans up the file syntax x after any edit operations. +// To avoid quadratic behavior, removeLine marks the line as dead +// by setting line.Token = nil but does not remove it from the slice +// in which it appears. After edits have all been indicated, +// calling Cleanup cleans out the dead lines. +func (x *FileSyntax) Cleanup() { + w := 0 + for _, stmt := range x.Stmt { + switch stmt := stmt.(type) { + case *Line: + if stmt.Token == nil { + continue + } + case *LineBlock: + ww := 0 + for _, line := range stmt.Line { + if line.Token != nil { + stmt.Line[ww] = line + ww++ + } + } + if ww == 0 { + continue + } + if ww == 1 { + // Collapse block into single line. + line := &Line{ + Comments: Comments{ + Before: commentsAdd(stmt.Before, stmt.Line[0].Before), + Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), + After: commentsAdd(stmt.Line[0].After, stmt.After), + }, + Token: stringsAdd(stmt.Token, stmt.Line[0].Token), + } + x.Stmt[w] = line + w++ + continue + } + stmt.Line = stmt.Line[:ww] + } + x.Stmt[w] = stmt + w++ + } + x.Stmt = x.Stmt[:w] +} + +func commentsAdd(x, y []Comment) []Comment { + return append(x[:len(x):len(x)], y...) +} + +func stringsAdd(x, y []string) []string { + return append(x[:len(x):len(x)], y...) +} + +// A CommentBlock represents a top-level block of comments separate +// from any rule. +type CommentBlock struct { + Comments + Start Position +} + +func (x *CommentBlock) Span() (start, end Position) { + return x.Start, x.Start +} + +// A Line is a single line of tokens. +type Line struct { + Comments + Start Position + Token []string + InBlock bool + End Position +} + +func (x *Line) Span() (start, end Position) { + return x.Start, x.End +} + +// A LineBlock is a factored block of lines, like +// +// require ( +// "x" +// "y" +// ) +// +type LineBlock struct { + Comments + Start Position + LParen LParen + Token []string + Line []*Line + RParen RParen +} + +func (x *LineBlock) Span() (start, end Position) { + return x.Start, x.RParen.Pos.add(")") +} + +// An LParen represents the beginning of a parenthesized line block. +// It is a place to store suffix comments. +type LParen struct { + Comments + Pos Position +} + +func (x *LParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An RParen represents the end of a parenthesized line block. +// It is a place to store whole-line (before) comments. +type RParen struct { + Comments + Pos Position +} + +func (x *RParen) Span() (start, end Position) { + return x.Pos, x.Pos.add(")") +} + +// An input represents a single input file being parsed. +type input struct { + // Lexing state. + filename string // name of input file, for errors + complete []byte // entire input + remaining []byte // remaining input + token []byte // token being scanned + lastToken string // most recently returned token, for error messages + pos Position // current input position + comments []Comment // accumulated comments + endRule int // position of end of current rule + + // Parser state. + file *FileSyntax // returned top-level syntax tree + parseError error // error encountered during parsing + + // Comment assignment state. + pre []Expr // all expressions, in preorder traversal + post []Expr // all expressions, in postorder traversal +} + +func newInput(filename string, data []byte) *input { + return &input{ + filename: filename, + complete: data, + remaining: data, + pos: Position{Line: 1, LineRune: 1, Byte: 0}, + } +} + +// parse parses the input file. +func parse(file string, data []byte) (f *FileSyntax, err error) { + in := newInput(file, data) + // The parser panics for both routine errors like syntax errors + // and for programmer bugs like array index errors. + // Turn both into error returns. Catching bug panics is + // especially important when processing many files. + defer func() { + if e := recover(); e != nil { + if e == in.parseError { + err = in.parseError + } else { + err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e) + } + } + }() + + // Invoke the parser. + in.parseFile() + if in.parseError != nil { + return nil, in.parseError + } + in.file.Name = in.filename + + // Assign comments to nearby syntax. + in.assignComments() + + return in.file, nil +} + +// Error is called to report an error. +// The reason s is often "syntax error". +// Error does not return: it panics. +func (in *input) Error(s string) { + if s == "syntax error" && in.lastToken != "" { + s += " near " + in.lastToken + } + in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s) + panic(in.parseError) +} + +// eof reports whether the input has reached end of file. +func (in *input) eof() bool { + return len(in.remaining) == 0 +} + +// peekRune returns the next rune in the input without consuming it. +func (in *input) peekRune() int { + if len(in.remaining) == 0 { + return 0 + } + r, _ := utf8.DecodeRune(in.remaining) + return int(r) +} + +// peekPrefix reports whether the remaining input begins with the given prefix. +func (in *input) peekPrefix(prefix string) bool { + // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) + // but without the allocation of the []byte copy of prefix. + for i := 0; i < len(prefix); i++ { + if i >= len(in.remaining) || in.remaining[i] != prefix[i] { + return false + } + } + return true +} + +// readRune consumes and returns the next rune in the input. +func (in *input) readRune() int { + if len(in.remaining) == 0 { + in.Error("internal lexer error: readRune at EOF") + } + r, size := utf8.DecodeRune(in.remaining) + in.remaining = in.remaining[size:] + if r == '\n' { + in.pos.Line++ + in.pos.LineRune = 1 + } else { + in.pos.LineRune++ + } + in.pos.Byte += size + return int(r) +} + +type symType struct { + pos Position + endPos Position + text string +} + +// startToken marks the beginning of the next input token. +// It must be followed by a call to endToken, once the token has +// been consumed using readRune. +func (in *input) startToken(sym *symType) { + in.token = in.remaining + sym.text = "" + sym.pos = in.pos +} + +// endToken marks the end of an input token. +// It records the actual token string in sym.text if the caller +// has not done that already. +func (in *input) endToken(sym *symType) { + if sym.text == "" { + tok := string(in.token[:len(in.token)-len(in.remaining)]) + sym.text = tok + in.lastToken = sym.text + } + sym.endPos = in.pos +} + +// lex is called from the parser to obtain the next input token. +// It returns the token value (either a rune like '+' or a symbolic token _FOR) +// and sets val to the data associated with the token. +// For all our input tokens, the associated data is +// val.Pos (the position where the token begins) +// and val.Token (the input string corresponding to the token). +func (in *input) lex(sym *symType) int { + // Skip past spaces, stopping at non-space or EOF. + countNL := 0 // number of newlines we've skipped past + for !in.eof() { + // Skip over spaces. Count newlines so we can give the parser + // information about where top-level blank lines are, + // for top-level comment assignment. + c := in.peekRune() + if c == ' ' || c == '\t' || c == '\r' { + in.readRune() + continue + } + + // Comment runs to end of line. + if in.peekPrefix("//") { + in.startToken(sym) + + // Is this comment the only thing on its line? + // Find the last \n before this // and see if it's all + // spaces from there to here. + i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) + suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 + in.readRune() + in.readRune() + + // Consume comment. + for len(in.remaining) > 0 && in.readRune() != '\n' { + } + in.endToken(sym) + + sym.text = strings.TrimRight(sym.text, "\n") + in.lastToken = "comment" + + // If we are at top level (not in a statement), hand the comment to + // the parser as a _COMMENT token. The grammar is written + // to handle top-level comments itself. + if !suffix { + // Not in a statement. Tell parser about top-level comment. + return _COMMENT + } + + // Otherwise, save comment for later attachment to syntax tree. + if countNL > 1 { + in.comments = append(in.comments, Comment{sym.pos, "", false}) + } + in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix}) + countNL = 1 + return _EOL + } + + if in.peekPrefix("/*") { + in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) + } + + // Found non-space non-comment. + break + } + + // Found the beginning of the next token. + in.startToken(sym) + defer in.endToken(sym) + + // End of file. + if in.eof() { + in.lastToken = "EOF" + return _EOF + } + + // Punctuation tokens. + switch c := in.peekRune(); c { + case '\n': + in.readRune() + return c + + case '(': + in.readRune() + return c + + case ')': + in.readRune() + return c + + case '"', '`': // quoted string + quote := c + in.readRune() + for { + if in.eof() { + in.pos = sym.pos + in.Error("unexpected EOF in string") + } + if in.peekRune() == '\n' { + in.Error("unexpected newline in string") + } + c := in.readRune() + if c == quote { + break + } + if c == '\\' && quote != '`' { + if in.eof() { + in.pos = sym.pos + in.Error("unexpected EOF in string") + } + in.readRune() + } + } + in.endToken(sym) + return _STRING + } + + // Checked all punctuation. Must be identifier token. + if c := in.peekRune(); !isIdent(c) { + in.Error(fmt.Sprintf("unexpected input character %#q", c)) + } + + // Scan over identifier. + for isIdent(in.peekRune()) { + if in.peekPrefix("//") { + break + } + if in.peekPrefix("/*") { + in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) + } + in.readRune() + } + return _IDENT +} + +// isIdent reports whether c is an identifier rune. +// We treat nearly all runes as identifier runes. +func isIdent(c int) bool { + return c != 0 && !unicode.IsSpace(rune(c)) +} + +// Comment assignment. +// We build two lists of all subexpressions, preorder and postorder. +// The preorder list is ordered by start location, with outer expressions first. +// The postorder list is ordered by end location, with outer expressions last. +// We use the preorder list to assign each whole-line comment to the syntax +// immediately following it, and we use the postorder list to assign each +// end-of-line comment to the syntax immediately preceding it. + +// order walks the expression adding it and its subexpressions to the +// preorder and postorder lists. +func (in *input) order(x Expr) { + if x != nil { + in.pre = append(in.pre, x) + } + switch x := x.(type) { + default: + panic(fmt.Errorf("order: unexpected type %T", x)) + case nil: + // nothing + case *LParen, *RParen: + // nothing + case *CommentBlock: + // nothing + case *Line: + // nothing + case *FileSyntax: + for _, stmt := range x.Stmt { + in.order(stmt) + } + case *LineBlock: + in.order(&x.LParen) + for _, l := range x.Line { + in.order(l) + } + in.order(&x.RParen) + } + if x != nil { + in.post = append(in.post, x) + } +} + +// assignComments attaches comments to nearby syntax. +func (in *input) assignComments() { + const debug = false + + // Generate preorder and postorder lists. + in.order(in.file) + + // Split into whole-line comments and suffix comments. + var line, suffix []Comment + for _, com := range in.comments { + if com.Suffix { + suffix = append(suffix, com) + } else { + line = append(line, com) + } + } + + if debug { + for _, c := range line { + fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign line comments to syntax immediately following. + for _, x := range in.pre { + start, _ := x.Span() + if debug { + fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) + } + xcom := x.Comment() + for len(line) > 0 && start.Byte >= line[0].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) + } + xcom.Before = append(xcom.Before, line[0]) + line = line[1:] + } + } + + // Remaining line comments go at end of file. + in.file.After = append(in.file.After, line...) + + if debug { + for _, c := range suffix { + fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) + } + } + + // Assign suffix comments to syntax immediately before. + for i := len(in.post) - 1; i >= 0; i-- { + x := in.post[i] + + start, end := x.Span() + if debug { + fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) + } + + // Do not assign suffix comments to end of line block or whole file. + // Instead assign them to the last element inside. + switch x.(type) { + case *FileSyntax: + continue + } + + // Do not assign suffix comments to something that starts + // on an earlier line, so that in + // + // x ( y + // z ) // comment + // + // we assign the comment to z and not to x ( ... ). + if start.Line != end.Line { + continue + } + xcom := x.Comment() + for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { + if debug { + fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) + } + xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) + suffix = suffix[:len(suffix)-1] + } + } + + // We assigned suffix comments in reverse. + // If multiple suffix comments were appended to the same + // expression node, they are now in reverse. Fix that. + for _, x := range in.post { + reverseComments(x.Comment().Suffix) + } + + // Remaining suffix comments go at beginning of file. + in.file.Before = append(in.file.Before, suffix...) +} + +// reverseComments reverses the []Comment list. +func reverseComments(list []Comment) { + for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { + list[i], list[j] = list[j], list[i] + } +} + +func (in *input) parseFile() { + in.file = new(FileSyntax) + var sym symType + var cb *CommentBlock + for { + tok := in.lex(&sym) + switch tok { + case '\n': + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + cb = nil + } + case _COMMENT: + if cb == nil { + cb = &CommentBlock{Start: sym.pos} + } + com := cb.Comment() + com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text}) + case _EOF: + if cb != nil { + in.file.Stmt = append(in.file.Stmt, cb) + } + return + default: + in.parseStmt(&sym) + if cb != nil { + in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before + cb = nil + } + } + } +} + +func (in *input) parseStmt(sym *symType) { + start := sym.pos + end := sym.endPos + token := []string{sym.text} + for { + tok := in.lex(sym) + switch tok { + case '\n', _EOF, _EOL: + in.file.Stmt = append(in.file.Stmt, &Line{ + Start: start, + Token: token, + End: end, + }) + return + case '(': + in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym)) + return + default: + token = append(token, sym.text) + end = sym.endPos + } + } +} + +func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock { + x := &LineBlock{ + Start: start, + Token: token, + LParen: LParen{Pos: sym.pos}, + } + var comments []Comment + for { + tok := in.lex(sym) + switch tok { + case _EOL: + // ignore + case '\n': + if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { + comments = append(comments, Comment{}) + } + case _COMMENT: + comments = append(comments, Comment{Start: sym.pos, Token: sym.text}) + case _EOF: + in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) + case ')': + x.RParen.Before = comments + x.RParen.Pos = sym.pos + tok = in.lex(sym) + if tok != '\n' && tok != _EOF && tok != _EOL { + in.Error("syntax error (expected newline after closing paren)") + } + return x + default: + l := in.parseLine(sym) + x.Line = append(x.Line, l) + l.Comment().Before = comments + comments = nil + } + } +} + +func (in *input) parseLine(sym *symType) *Line { + start := sym.pos + end := sym.endPos + token := []string{sym.text} + for { + tok := in.lex(sym) + switch tok { + case '\n', _EOF, _EOL: + return &Line{ + Start: start, + Token: token, + End: end, + InBlock: true, + } + default: + token = append(token, sym.text) + end = sym.endPos + } + } +} + +const ( + _EOF = -(1 + iota) + _EOL + _IDENT + _STRING + _COMMENT +) + +var ( + slashSlash = []byte("//") + moduleStr = []byte("module") +) + +// ModulePath returns the module path from the gomod file text. +// If it cannot find a module path, it returns an empty string. +// It is tolerant of unrelated problems in the go.mod file. +func ModulePath(mod []byte) string { + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + return "" // missing module path +} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/rule.go b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go new file mode 100644 index 0000000000..24d275f12f --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go @@ -0,0 +1,724 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfile + +import ( + "bytes" + "errors" + "fmt" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + + "github.com/rogpeppe/go-internal/module" + "github.com/rogpeppe/go-internal/semver" +) + +// A File is the parsed, interpreted form of a go.mod file. +type File struct { + Module *Module + Go *Go + Require []*Require + Exclude []*Exclude + Replace []*Replace + + Syntax *FileSyntax +} + +// A Module is the module statement. +type Module struct { + Mod module.Version + Syntax *Line +} + +// A Go is the go statement. +type Go struct { + Version string // "1.23" + Syntax *Line +} + +// A Require is a single require statement. +type Require struct { + Mod module.Version + Indirect bool // has "// indirect" comment + Syntax *Line +} + +// An Exclude is a single exclude statement. +type Exclude struct { + Mod module.Version + Syntax *Line +} + +// A Replace is a single replace statement. +type Replace struct { + Old module.Version + New module.Version + Syntax *Line +} + +func (f *File) AddModuleStmt(path string) error { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + if f.Module == nil { + f.Module = &Module{ + Mod: module.Version{Path: path}, + Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), + } + } else { + f.Module.Mod.Path = path + f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) + } + return nil +} + +func (f *File) AddComment(text string) { + if f.Syntax == nil { + f.Syntax = new(FileSyntax) + } + f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ + Comments: Comments{ + Before: []Comment{ + { + Token: text, + }, + }, + }, + }) +} + +type VersionFixer func(path, version string) (string, error) + +// Parse parses the data, reported in errors as being from file, +// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found. +func Parse(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, true) +} + +// ParseLax is like Parse but ignores unknown statements. +// It is used when parsing go.mod files other than the main module, +// under the theory that most statement types we add in the future will +// only apply in the main module, like exclude and replace, +// and so we get better gradual deployments if old go commands +// simply ignore those statements when found in go.mod files +// in dependencies. +func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { + return parseToFile(file, data, fix, false) +} + +func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) { + fs, err := parse(file, data) + if err != nil { + return nil, err + } + f := &File{ + Syntax: fs, + } + + var errs bytes.Buffer + for _, x := range fs.Stmt { + switch x := x.(type) { + case *Line: + f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict) + + case *LineBlock: + if len(x.Token) > 1 { + if strict { + fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) + } + continue + } + switch x.Token[0] { + default: + if strict { + fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) + } + continue + case "module", "require", "exclude", "replace": + for _, l := range x.Line { + f.add(&errs, l, x.Token[0], l.Token, fix, strict) + } + } + } + } + + if errs.Len() > 0 { + return nil, errors.New(strings.TrimRight(errs.String(), "\n")) + } + return f, nil +} + +var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) + +func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) { + // If strict is false, this module is a dependency. + // We ignore all unknown directives as well as main-module-only + // directives like replace and exclude. It will work better for + // forward compatibility if we can depend on modules that have unknown + // statements (presumed relevant only when acting as the main module) + // and simply ignore those statements. + if !strict { + switch verb { + case "module", "require", "go": + // want these even for dependency go.mods + default: + return + } + } + + switch verb { + default: + fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb) + + case "go": + if f.Go != nil { + fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line) + return + } + if len(args) != 1 || !goVersionRE.MatchString(args[0]) { + fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line) + return + } + f.Go = &Go{Syntax: line} + f.Go.Version = args[0] + case "module": + if f.Module != nil { + fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line) + return + } + f.Module = &Module{Syntax: line} + if len(args) != 1 { + + fmt.Fprintf(errs, "%s:%d: usage: module module/path [version]\n", f.Syntax.Name, line.Start.Line) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + f.Module.Mod = module.Version{Path: s} + case "require", "exclude": + if len(args) != 2 { + fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + old := args[1] + v, err := parseVersion(s, &args[1], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %q: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + if !module.MatchPathMajor(v, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) + return + } + if verb == "require" { + f.Require = append(f.Require, &Require{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + Indirect: isIndirect(line), + }) + } else { + f.Exclude = append(f.Exclude, &Exclude{ + Mod: module.Version{Path: s, Version: v}, + Syntax: line, + }) + } + case "replace": + arrow := 2 + if len(args) >= 2 && args[1] == "=>" { + arrow = 1 + } + if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { + fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb) + return + } + s, err := parseString(&args[0]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + pathMajor, err := modulePathMajor(s) + if err != nil { + fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + var v string + if arrow == 2 { + old := args[1] + v, err = parseVersion(s, &args[1], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + if !module.MatchPathMajor(v, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) + return + } + } + ns, err := parseString(&args[arrow+1]) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) + return + } + nv := "" + if len(args) == arrow+2 { + if !IsDirectoryPath(ns) { + fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line) + return + } + if filepath.Separator == '/' && strings.Contains(ns, `\`) { + fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line) + return + } + } + if len(args) == arrow+3 { + old := args[arrow+1] + nv, err = parseVersion(ns, &args[arrow+2], fix) + if err != nil { + fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) + return + } + if IsDirectoryPath(ns) { + fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns) + return + } + } + f.Replace = append(f.Replace, &Replace{ + Old: module.Version{Path: s, Version: v}, + New: module.Version{Path: ns, Version: nv}, + Syntax: line, + }) + } +} + +// isIndirect reports whether line has a "// indirect" comment, +// meaning it is in go.mod only for its effect on indirect dependencies, +// so that it can be dropped entirely once the effective version of the +// indirect dependency reaches the given minimum version. +func isIndirect(line *Line) bool { + if len(line.Suffix) == 0 { + return false + } + f := strings.Fields(line.Suffix[0].Token) + return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//" +} + +// setIndirect sets line to have (or not have) a "// indirect" comment. +func setIndirect(line *Line, indirect bool) { + if isIndirect(line) == indirect { + return + } + if indirect { + // Adding comment. + if len(line.Suffix) == 0 { + // New comment. + line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} + return + } + // Insert at beginning of existing comment. + com := &line.Suffix[0] + space := " " + if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' { + space = "" + } + com.Token = "// indirect;" + space + com.Token[2:] + return + } + + // Removing comment. + f := strings.Fields(line.Suffix[0].Token) + if len(f) == 2 { + // Remove whole comment. + line.Suffix = nil + return + } + + // Remove comment prefix. + com := &line.Suffix[0] + i := strings.Index(com.Token, "indirect;") + com.Token = "//" + com.Token[i+len("indirect;"):] +} + +// IsDirectoryPath reports whether the given path should be interpreted +// as a directory path. Just like on the go command line, relative paths +// and rooted paths are directory paths; the rest are module paths. +func IsDirectoryPath(ns string) bool { + // Because go.mod files can move from one system to another, + // we check all known path syntaxes, both Unix and Windows. + return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || + strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' +} + +// MustQuote reports whether s must be quoted in order to appear as +// a single token in a go.mod line. +func MustQuote(s string) bool { + for _, r := range s { + if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' { + return true + } + } + return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") +} + +// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, +// the quotation of s. +func AutoQuote(s string) string { + if MustQuote(s) { + return strconv.Quote(s) + } + return s +} + +func parseString(s *string) (string, error) { + t := *s + if strings.HasPrefix(t, `"`) { + var err error + if t, err = strconv.Unquote(t); err != nil { + return "", err + } + } else if strings.ContainsAny(t, "\"'`") { + // Other quotes are reserved both for possible future expansion + // and to avoid confusion. For example if someone types 'x' + // we want that to be a syntax error and not a literal x in literal quotation marks. + return "", fmt.Errorf("unquoted string cannot contain quote") + } + *s = AutoQuote(t) + return t, nil +} + +func parseVersion(path string, s *string, fix VersionFixer) (string, error) { + t, err := parseString(s) + if err != nil { + return "", err + } + if fix != nil { + var err error + t, err = fix(path, t) + if err != nil { + return "", err + } + } + if v := module.CanonicalVersion(t); v != "" { + *s = v + return *s, nil + } + return "", fmt.Errorf("version must be of the form v1.2.3") +} + +func modulePathMajor(path string) (string, error) { + _, major, ok := module.SplitPathVersion(path) + if !ok { + return "", fmt.Errorf("invalid module path") + } + return major, nil +} + +func (f *File) Format() ([]byte, error) { + return Format(f.Syntax), nil +} + +// Cleanup cleans up the file f after any edit operations. +// To avoid quadratic behavior, modifications like DropRequire +// clear the entry but do not remove it from the slice. +// Cleanup cleans out all the cleared entries. +func (f *File) Cleanup() { + w := 0 + for _, r := range f.Require { + if r.Mod.Path != "" { + f.Require[w] = r + w++ + } + } + f.Require = f.Require[:w] + + w = 0 + for _, x := range f.Exclude { + if x.Mod.Path != "" { + f.Exclude[w] = x + w++ + } + } + f.Exclude = f.Exclude[:w] + + w = 0 + for _, r := range f.Replace { + if r.Old.Path != "" { + f.Replace[w] = r + w++ + } + } + f.Replace = f.Replace[:w] + + f.Syntax.Cleanup() +} + +func (f *File) AddRequire(path, vers string) error { + need := true + for _, r := range f.Require { + if r.Mod.Path == path { + if need { + r.Mod.Version = vers + f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) + need = false + } else { + f.Syntax.removeLine(r.Syntax) + *r = Require{} + } + } + } + + if need { + f.AddNewRequire(path, vers, false) + } + return nil +} + +func (f *File) AddNewRequire(path, vers string, indirect bool) { + line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) + setIndirect(line, indirect) + f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line}) +} + +func (f *File) SetRequire(req []*Require) { + need := make(map[string]string) + indirect := make(map[string]bool) + for _, r := range req { + need[r.Mod.Path] = r.Mod.Version + indirect[r.Mod.Path] = r.Indirect + } + + for _, r := range f.Require { + if v, ok := need[r.Mod.Path]; ok { + r.Mod.Version = v + r.Indirect = indirect[r.Mod.Path] + } + } + + var newStmts []Expr + for _, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *LineBlock: + if len(stmt.Token) > 0 && stmt.Token[0] == "require" { + var newLines []*Line + for _, line := range stmt.Line { + if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" { + line.Token[1] = need[p] + delete(need, p) + setIndirect(line, indirect[p]) + newLines = append(newLines, line) + } + } + if len(newLines) == 0 { + continue // drop stmt + } + stmt.Line = newLines + } + + case *Line: + if len(stmt.Token) > 0 && stmt.Token[0] == "require" { + if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" { + stmt.Token[2] = need[p] + delete(need, p) + setIndirect(stmt, indirect[p]) + } else { + continue // drop stmt + } + } + } + newStmts = append(newStmts, stmt) + } + f.Syntax.Stmt = newStmts + + for path, vers := range need { + f.AddNewRequire(path, vers, indirect[path]) + } + f.SortBlocks() +} + +func (f *File) DropRequire(path string) error { + for _, r := range f.Require { + if r.Mod.Path == path { + f.Syntax.removeLine(r.Syntax) + *r = Require{} + } + } + return nil +} + +func (f *File) AddExclude(path, vers string) error { + var hint *Line + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + return nil + } + if x.Mod.Path == path { + hint = x.Syntax + } + } + + f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) + return nil +} + +func (f *File) DropExclude(path, vers string) error { + for _, x := range f.Exclude { + if x.Mod.Path == path && x.Mod.Version == vers { + f.Syntax.removeLine(x.Syntax) + *x = Exclude{} + } + } + return nil +} + +func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { + need := true + old := module.Version{Path: oldPath, Version: oldVers} + new := module.Version{Path: newPath, Version: newVers} + tokens := []string{"replace", AutoQuote(oldPath)} + if oldVers != "" { + tokens = append(tokens, oldVers) + } + tokens = append(tokens, "=>", AutoQuote(newPath)) + if newVers != "" { + tokens = append(tokens, newVers) + } + + var hint *Line + for _, r := range f.Replace { + if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { + if need { + // Found replacement for old; update to use new. + r.New = new + f.Syntax.updateLine(r.Syntax, tokens...) + need = false + continue + } + // Already added; delete other replacements for same. + f.Syntax.removeLine(r.Syntax) + *r = Replace{} + } + if r.Old.Path == oldPath { + hint = r.Syntax + } + } + if need { + f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)}) + } + return nil +} + +func (f *File) DropReplace(oldPath, oldVers string) error { + for _, r := range f.Replace { + if r.Old.Path == oldPath && r.Old.Version == oldVers { + f.Syntax.removeLine(r.Syntax) + *r = Replace{} + } + } + return nil +} + +func (f *File) SortBlocks() { + f.removeDups() // otherwise sorting is unsafe + + for _, stmt := range f.Syntax.Stmt { + block, ok := stmt.(*LineBlock) + if !ok { + continue + } + sort.Slice(block.Line, func(i, j int) bool { + li := block.Line[i] + lj := block.Line[j] + for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { + if li.Token[k] != lj.Token[k] { + return li.Token[k] < lj.Token[k] + } + } + return len(li.Token) < len(lj.Token) + }) + } +} + +func (f *File) removeDups() { + have := make(map[module.Version]bool) + kill := make(map[*Line]bool) + for _, x := range f.Exclude { + if have[x.Mod] { + kill[x.Syntax] = true + continue + } + have[x.Mod] = true + } + var excl []*Exclude + for _, x := range f.Exclude { + if !kill[x.Syntax] { + excl = append(excl, x) + } + } + f.Exclude = excl + + have = make(map[module.Version]bool) + // Later replacements take priority over earlier ones. + for i := len(f.Replace) - 1; i >= 0; i-- { + x := f.Replace[i] + if have[x.Old] { + kill[x.Syntax] = true + continue + } + have[x.Old] = true + } + var repl []*Replace + for _, x := range f.Replace { + if !kill[x.Syntax] { + repl = append(repl, x) + } + } + f.Replace = repl + + var stmts []Expr + for _, stmt := range f.Syntax.Stmt { + switch stmt := stmt.(type) { + case *Line: + if kill[stmt] { + continue + } + case *LineBlock: + var lines []*Line + for _, line := range stmt.Line { + if !kill[line] { + lines = append(lines, line) + } + } + stmt.Line = lines + if len(lines) == 0 { + continue + } + } + stmts = append(stmts, stmt) + } + f.Syntax.Stmt = stmts +} diff --git a/vendor/github.com/rogpeppe/go-internal/module/module.go b/vendor/github.com/rogpeppe/go-internal/module/module.go new file mode 100644 index 0000000000..3ff6d9bf53 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/module/module.go @@ -0,0 +1,540 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package module defines the module.Version type +// along with support code. +package module + +// IMPORTANT NOTE +// +// This file essentially defines the set of valid import paths for the go command. +// There are many subtle considerations, including Unicode ambiguity, +// security, network, and file system representations. +// +// This file also defines the set of valid module path and version combinations, +// another topic with many subtle considerations. +// +// Changes to the semantics in this file require approval from rsc. + +import ( + "fmt" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/rogpeppe/go-internal/semver" +) + +// A Version is defined by a module path and version pair. +type Version struct { + Path string + + // Version is usually a semantic version in canonical form. + // There are two exceptions to this general rule. + // First, the top-level target of a build has no specific version + // and uses Version = "". + // Second, during MVS calculations the version "none" is used + // to represent the decision to take no version of a given module. + Version string `json:",omitempty"` +} + +// Check checks that a given module path, version pair is valid. +// In addition to the path being a valid module path +// and the version being a valid semantic version, +// the two must correspond. +// For example, the path "yaml/v2" only corresponds to +// semantic versions beginning with "v2.". +func Check(path, version string) error { + if err := CheckPath(path); err != nil { + return err + } + if !semver.IsValid(version) { + return fmt.Errorf("malformed semantic version %v", version) + } + _, pathMajor, _ := SplitPathVersion(path) + if !MatchPathMajor(version, pathMajor) { + if pathMajor == "" { + pathMajor = "v0 or v1" + } + if pathMajor[0] == '.' { // .v1 + pathMajor = pathMajor[1:] + } + return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) + } + return nil +} + +// firstPathOK reports whether r can appear in the first element of a module path. +// The first element of the path must be an LDH domain name, at least for now. +// To avoid case ambiguity, the domain name must be entirely lower case. +func firstPathOK(r rune) bool { + return r == '-' || r == '.' || + '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' +} + +// pathOK reports whether r can appear in an import path element. +// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// This matches what "go get" has historically recognized in import paths. +// TODO(rsc): We would like to allow Unicode letters, but that requires additional +// care in the safe encoding (see note below). +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return false +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "safe encoding" below. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// CheckPath checks that a module path is valid. +func CheckPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed module path %q: %v", path, err) + } + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if i == 0 { + return fmt.Errorf("malformed module path %q: leading slash", path) + } + if !strings.Contains(path[:i], ".") { + return fmt.Errorf("malformed module path %q: missing dot in first path element", path) + } + if path[0] == '-' { + return fmt.Errorf("malformed module path %q: leading dash in first path element", path) + } + for _, r := range path[:i] { + if !firstPathOK(r) { + return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) + } + } + if _, _, ok := SplitPathVersion(path); !ok { + return fmt.Errorf("malformed module path %q: invalid version", path) + } + return nil +} + +// CheckImportPath checks that an import path is valid. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if strings.Contains(path, "..") { + return fmt.Errorf("double dot") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("disallowed path element %q", elem) + } + } + return nil +} + +// CheckFilePath checks whether a slash-separated file path is valid. +func CheckFilePath(path string) error { + if err := checkPath(path, true); err != nil { + return fmt.Errorf("malformed file path %q: %v", path, err) + } + return nil +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} + +// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path +// and version is either empty or "/vN" for N >= 2. +// As a special case, gopkg.in paths are recognized directly; +// they require ".vN" instead of "/vN", and for all N, not just N >= 2. +func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { + if strings.HasPrefix(path, "gopkg.in/") { + return splitGopkgIn(path) + } + + i := len(path) + dot := false + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + if path[i-1] == '.' { + dot = true + } + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' { + return path, "", true + } + prefix, pathMajor = path[:i-2], path[i-2:] + if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { + return path, "", false + } + return prefix, pathMajor, true +} + +// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. +func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { + if !strings.HasPrefix(path, "gopkg.in/") { + return path, "", false + } + i := len(path) + if strings.HasSuffix(path, "-unstable") { + i -= len("-unstable") + } + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { + i-- + } + if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { + // All gopkg.in paths must end in vN for some N. + return path, "", false + } + prefix, pathMajor = path[:i-2], path[i-2:] + if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { + return path, "", false + } + return prefix, pathMajor, true +} + +// MatchPathMajor reports whether the semantic version v +// matches the path major version pathMajor. +func MatchPathMajor(v, pathMajor string) bool { + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { + // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. + // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. + return true + } + m := semver.Major(v) + if pathMajor == "" { + return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" + } + return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] +} + +// CanonicalVersion returns the canonical form of the version string v. +// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +func CanonicalVersion(v string) string { + cv := semver.Canonical(v) + if semver.Build(v) == "+incompatible" { + cv += "+incompatible" + } + return cv +} + +// Sort sorts the list by Path, breaking ties by comparing Versions. +func Sort(list []Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return semver.Compare(vi, vj) < 0 + } + return fi < fj + }) +} + +// Safe encodings +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the safe encoding be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe encoding that +// leaves most paths unaltered. +// +// The safe encoding is this: +// replace every uppercase letter with an exclamation mark +// followed by the letter's lowercase equivalent. +// +// For example, +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the safe encoding is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to encode a literal !. +// +// Although paths are disallowed from using Unicode (see pathOK above), +// the eventual plan is to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention. Note however that not all runes that +// are different but case-fold equivalent are an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are considered to case-fold to each other. When we do add Unicode +// letters, we must not assume that upper/lower are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would encode as "!!k", or perhaps as "(212A)". +// +// Also, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. + +// EncodePath returns the safe encoding of the given module path. +// It fails if the module path is invalid. +func EncodePath(path string) (encoding string, err error) { + if err := CheckPath(path); err != nil { + return "", err + } + + return encodeString(path) +} + +// EncodeVersion returns the safe encoding of the given module version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func EncodeVersion(v string) (encoding string, err error) { + if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { + return "", fmt.Errorf("disallowed version string %q", v) + } + return encodeString(v) +} + +func encodeString(s string) (encoding string, err error) { + haveUpper := false + for _, r := range s { + if r == '!' || r >= utf8.RuneSelf { + // This should be disallowed by CheckPath, but diagnose anyway. + // The correctness of the encoding loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EncodePath") + } + if 'A' <= r && r <= 'Z' { + haveUpper = true + } + } + + if !haveUpper { + return s, nil + } + + var buf []byte + for _, r := range s { + if 'A' <= r && r <= 'Z' { + buf = append(buf, '!', byte(r+'a'-'A')) + } else { + buf = append(buf, byte(r)) + } + } + return string(buf), nil +} + +// DecodePath returns the module path of the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid path. +func DecodePath(encoding string) (path string, err error) { + path, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid module path encoding %q", encoding) + } + if err := CheckPath(path); err != nil { + return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) + } + return path, nil +} + +// DecodeVersion returns the version string for the given safe encoding. +// It fails if the encoding is invalid or encodes an invalid version. +// Versions are allowed to be in non-semver form but must be valid file names +// and not contain exclamation marks. +func DecodeVersion(encoding string) (v string, err error) { + v, ok := decodeString(encoding) + if !ok { + return "", fmt.Errorf("invalid version encoding %q", encoding) + } + if err := checkElem(v, true); err != nil { + return "", fmt.Errorf("disallowed version string %q", v) + } + return v, nil +} + +func decodeString(encoding string) (string, bool) { + var buf []byte + + bang := false + for _, r := range encoding { + if r >= utf8.RuneSelf { + return "", false + } + if bang { + bang = false + if r < 'a' || 'z' < r { + return "", false + } + buf = append(buf, byte(r+'A'-'a')) + continue + } + if r == '!' { + bang = true + continue + } + if 'A' <= r && r <= 'Z' { + return "", false + } + buf = append(buf, byte(r)) + } + if bang { + return "", false + } + return string(buf), true +} diff --git a/vendor/github.com/rogpeppe/go-internal/semver/semver.go b/vendor/github.com/rogpeppe/go-internal/semver/semver.go new file mode 100644 index 0000000000..4af7118e55 --- /dev/null +++ b/vendor/github.com/rogpeppe/go-internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 0000000000..298f0e2665 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 0000000000..f5b5e127cd --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 0000000000..616ff8ff74 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 0000000000..29a26c67dd --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 0000000000..5728243d96 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 0000000000..968fc2783e --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 0000000000..e8108a851e --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,293 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 0000000000..c42193688c --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 0000000000..5c3a3d8fff --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 0000000000..89c1bfc0a7 --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 0000000000..c18a87fb71 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 0000000000..e104013f45 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 0000000000..03a57ee5b5 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 0000000000..7af2fb56ff --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,317 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 0000000000..09498e70fb --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 0000000000..13cc1b84c9 --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 0000000000..18f60a0f6b --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 0000000000..c6376ec373 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 0000000000..9d92dbc051 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 0000000000..eda96312df --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,320 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories. +// At the end of the directory view, the error is io.EOF if c > 0. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + + if c <= 0 && len(f.files) == 0 { + return f.files, nil + } + + if f.off >= len(f.files) { + return nil, io.EOF + } + + if c <= 0 { + return f.files[f.off:], nil + } + + if c > len(f.files) { + c = len(f.files) + } + + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 0000000000..4f253f481e --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go index 0517ec6a84..2a6b02dccd 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=apiextensions.k8s.io // Package apiextensions is the internal version of the API. -// +groupName=apiextensions.k8s.io package apiextensions // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index 6fc75154fa..fcbd8bd197 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -20,6 +20,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" +) + // CustomResourceDefinitionSpec describes how a user wants their resource to appear type CustomResourceDefinitionSpec struct { // Group is the group this resource belongs in @@ -34,8 +44,14 @@ type CustomResourceDefinitionSpec struct { // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced Scope ResourceScope // Validation describes the validation methods for CustomResources + // Optional, the global validation schema for all versions. + // Top-level and per-version schemas are mutually exclusive. + // +optional Validation *CustomResourceValidation - // Subresources describes the subresources for CustomResources + // Subresources describes the subresources for CustomResource + // Optional, the global subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. + // +optional Subresources *CustomResourceSubresources // Versions is the list of all supported versions for this resource. // If Version field is provided, this field is optional. @@ -50,9 +66,90 @@ type CustomResourceDefinitionSpec struct { // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. Versions []CustomResourceDefinitionVersion // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // Optional, the global columns for all versions. + // Top-level and per-version columns are mutually exclusive. + // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition + + // `conversion` defines conversion settings for the CRD. + Conversion *CustomResourceConversion } +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // `strategy` specifies the conversion strategy. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + Strategy ConversionStrategyType + + // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. + WebhookClientConfig *WebhookClientConfig +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook. It has the same field as admissionregistration.internal.WebhookClientConfig. +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + Service *ServiceReference + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string + // `name` is the name of the service. + // Required + Name string + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string +} + +// CustomResourceDefinitionVersion describes a version for CRD. type CustomResourceDefinitionVersion struct { // Name is the version name, e.g. “v1”, “v2beta1”, etc. Name string @@ -61,6 +158,27 @@ type CustomResourceDefinitionVersion struct { // Storage flags the version as storage version. There must be exactly one flagged // as storage version. Storage bool + // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + Schema *CustomResourceValidation + // Subresources describes the subresources for CustomResource + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + Subresources *CustomResourceSubresources + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an + // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must + // be explicitly set to null + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition } // CustomResourceColumnDefinition specifies a column for server side printing. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index e3235e8702..5aae97cf1a 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -19,12 +19,9 @@ package v1beta1 import ( "strings" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) -var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() - func addDefaultingFuncs(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) // TODO figure out why I can't seem to get my defaulter generated @@ -66,9 +63,19 @@ func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) if len(obj.Version) == 0 && len(obj.Versions) != 0 { obj.Version = obj.Versions[0].Name } - if len(obj.AdditionalPrinterColumns) == 0 { - obj.AdditionalPrinterColumns = []CustomResourceColumnDefinition{ - {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"], JSONPath: ".metadata.creationTimestamp"}, + if obj.Conversion == nil { + obj.Conversion = &CustomResourceConversion{ + Strategy: NoneConverter, + } + } +} + +// hasPerVersionColumns returns true if a CRD uses per-version columns. +func hasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if len(v.AdditionalPrinterColumns) > 0 { + return true } } + return false } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go index 50ab2b54c6..acd09aca28 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go @@ -17,8 +17,8 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions // +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +groupName=apiextensions.k8s.io // Package v1beta1 is the v1beta1 version of the API. -// +groupName=apiextensions.k8s.io -// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index 005f4bc4d3..90bae839e1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto -// DO NOT EDIT! /* Package v1beta1 is a generated protocol buffer package. @@ -25,7 +24,11 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto It has these top-level messages: + ConversionRequest + ConversionResponse + ConversionReview CustomResourceColumnDefinition + CustomResourceConversion CustomResourceDefinition CustomResourceDefinitionCondition CustomResourceDefinitionList @@ -43,6 +46,8 @@ limitations under the License. JSONSchemaPropsOrArray JSONSchemaPropsOrBool JSONSchemaPropsOrStringArray + ServiceReference + WebhookClientConfig */ package v1beta1 @@ -50,7 +55,12 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import encoding_binary "encoding/binary" import strings "strings" import reflect "reflect" @@ -68,106 +78,136 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } +func (*ConversionRequest) ProtoMessage() {} +func (*ConversionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } +func (*ConversionResponse) ProtoMessage() {} +func (*ConversionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ConversionReview) Reset() { *m = ConversionReview{} } +func (*ConversionReview) ProtoMessage() {} +func (*ConversionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } func (*CustomResourceColumnDefinition) ProtoMessage() {} func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptorGenerated, []int{3} +} + +func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } +func (*CustomResourceConversion) ProtoMessage() {} +func (*CustomResourceConversion) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} } func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptorGenerated, []int{5} } func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{6} } func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptorGenerated, []int{7} } func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptorGenerated, []int{8} } func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{9} } func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{10} } func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptorGenerated, []int{11} } func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptorGenerated, []int{12} } func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{13} } func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{14} } func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} + return fileDescriptorGenerated, []int{15} } func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *JSON) Reset() { *m = JSON{} } func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{17} + return fileDescriptorGenerated, []int{21} } +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + func init() { + proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") + proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionResponse") + proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionReview") proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition") + proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversion") proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition") proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition") proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList") @@ -185,7 +225,127 @@ func init() { proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") +} +func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) + i += copy(dAtA[i:], m.DesiredAPIVersion) + if len(m.Objects) > 0 { + for _, msg := range m.Objects { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + if len(m.ConvertedObjects) > 0 { + for _, msg := range m.ConvertedObjects { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n1, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *ConversionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) + n2, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Response != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) + n3, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil } + func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -227,6 +387,38 @@ func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i += copy(dAtA[i:], m.Strategy) + if m.WebhookClientConfig != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.WebhookClientConfig.Size())) + n4, err := m.WebhookClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -245,27 +437,27 @@ func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n1 + i += n5 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) + n6, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n6 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) + n7, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n7 return i, nil } @@ -295,11 +487,11 @@ func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n4, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n8 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -329,11 +521,11 @@ func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n9 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -439,11 +631,11 @@ func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Names.Size())) - n6, err := m.Names.MarshalTo(dAtA[i:]) + n10, err := m.Names.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n10 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) @@ -452,21 +644,21 @@ func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Validation.Size())) - n7, err := m.Validation.MarshalTo(dAtA[i:]) + n11, err := m.Validation.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n11 } if m.Subresources != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) - n8, err := m.Subresources.MarshalTo(dAtA[i:]) + n12, err := m.Subresources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n12 } if len(m.Versions) > 0 { for _, msg := range m.Versions { @@ -492,6 +684,16 @@ func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.Conversion != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Conversion.Size())) + n13, err := m.Conversion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } return i, nil } @@ -525,11 +727,11 @@ func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AcceptedNames.Size())) - n9, err := m.AcceptedNames.MarshalTo(dAtA[i:]) + n14, err := m.AcceptedNames.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n14 if len(m.StoredVersions) > 0 { for _, s := range m.StoredVersions { dAtA[i] = 0x1a @@ -583,6 +785,38 @@ func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0 } i++ + if m.Schema != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) + n15, err := m.Schema.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.Subresources != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Subresources.Size())) + n16, err := m.Subresources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, msg := range m.AdditionalPrinterColumns { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -655,21 +889,21 @@ func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n10, err := m.Status.MarshalTo(dAtA[i:]) + n17, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n17 } if m.Scale != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Scale.Size())) - n11, err := m.Scale.MarshalTo(dAtA[i:]) + n18, err := m.Scale.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n18 } return i, nil } @@ -693,11 +927,11 @@ func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.OpenAPIV3Schema.Size())) - n12, err := m.OpenAPIV3Schema.MarshalTo(dAtA[i:]) + n19, err := m.OpenAPIV3Schema.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n19 } return i, nil } @@ -801,16 +1035,17 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Default.Size())) - n13, err := m.Default.MarshalTo(dAtA[i:]) + n20, err := m.Default.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n20 } if m.Maximum != nil { dAtA[i] = 0x49 i++ - i = encodeFixed64Generated(dAtA, i, uint64(math.Float64bits(float64(*m.Maximum)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) + i += 8 } dAtA[i] = 0x50 i++ @@ -823,7 +1058,8 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { if m.Minimum != nil { dAtA[i] = 0x59 i++ - i = encodeFixed64Generated(dAtA, i, uint64(math.Float64bits(float64(*m.Minimum)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) + i += 8 } dAtA[i] = 0x60 i++ @@ -876,7 +1112,8 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { i++ dAtA[i] = 0x1 i++ - i = encodeFixed64Generated(dAtA, i, uint64(math.Float64bits(float64(*m.MultipleOf)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) + i += 8 } if len(m.Enum) > 0 { for _, msg := range m.Enum { @@ -929,11 +1166,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Items.Size())) - n14, err := m.Items.MarshalTo(dAtA[i:]) + n21, err := m.Items.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n21 } if len(m.AllOf) > 0 { for _, msg := range m.AllOf { @@ -983,11 +1220,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Not.Size())) - n15, err := m.Not.MarshalTo(dAtA[i:]) + n22, err := m.Not.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n22 } if len(m.Properties) > 0 { keysForProperties := make([]string, 0, len(m.Properties)) @@ -1015,11 +1252,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n16, err := (&v).MarshalTo(dAtA[i:]) + n23, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n23 } } if m.AdditionalProperties != nil { @@ -1028,11 +1265,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalProperties.Size())) - n17, err := m.AdditionalProperties.MarshalTo(dAtA[i:]) + n24, err := m.AdditionalProperties.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n24 } if len(m.PatternProperties) > 0 { keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) @@ -1060,11 +1297,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n18, err := (&v).MarshalTo(dAtA[i:]) + n25, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n25 } } if len(m.Dependencies) > 0 { @@ -1093,11 +1330,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n19, err := (&v).MarshalTo(dAtA[i:]) + n26, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n26 } } if m.AdditionalItems != nil { @@ -1106,11 +1343,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AdditionalItems.Size())) - n20, err := m.AdditionalItems.MarshalTo(dAtA[i:]) + n27, err := m.AdditionalItems.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n27 } if len(m.Definitions) > 0 { keysForDefinitions := make([]string, 0, len(m.Definitions)) @@ -1138,11 +1375,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n21, err := (&v).MarshalTo(dAtA[i:]) + n28, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n28 } } if m.ExternalDocs != nil { @@ -1151,11 +1388,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ExternalDocs.Size())) - n22, err := m.ExternalDocs.MarshalTo(dAtA[i:]) + n29, err := m.ExternalDocs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n29 } if m.Example != nil { dAtA[i] = 0xa2 @@ -1163,11 +1400,11 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Example.Size())) - n23, err := m.Example.MarshalTo(dAtA[i:]) + n30, err := m.Example.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n30 } dAtA[i] = 0xa8 i++ @@ -1201,11 +1438,11 @@ func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n24, err := m.Schema.MarshalTo(dAtA[i:]) + n31, err := m.Schema.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n31 } if len(m.JSONSchemas) > 0 { for _, msg := range m.JSONSchemas { @@ -1249,11 +1486,11 @@ func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n25, err := m.Schema.MarshalTo(dAtA[i:]) + n32, err := m.Schema.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n32 } return i, nil } @@ -1277,11 +1514,11 @@ func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Schema.Size())) - n26, err := m.Schema.MarshalTo(dAtA[i:]) + n33, err := m.Schema.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n33 } if len(m.Property) > 0 { for _, s := range m.Property { @@ -1301,24 +1538,78 @@ func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n34, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.CABundle != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + if m.URL != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + return i, nil } + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1328,14 +1619,60 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *CustomResourceColumnDefinition) Size() (n int) { +func (m *ConversionRequest) Size() (n int) { var l int _ = l - l = len(m.Name) + l = len(m.UID) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) + l = len(m.DesiredAPIVersion) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Format) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConversionResponse) Size() (n int) { + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ConvertedObjects) > 0 { + for _, e := range m.ConvertedObjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConversionReview) Size() (n int) { + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomResourceColumnDefinition) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) n += 1 + l + sovGenerated(uint64(l)) l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) @@ -1345,6 +1682,18 @@ func (m *CustomResourceColumnDefinition) Size() (n int) { return n } +func (m *CustomResourceConversion) Size() (n int) { + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + if m.WebhookClientConfig != nil { + l = m.WebhookClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *CustomResourceDefinition) Size() (n int) { var l int _ = l @@ -1444,6 +1793,10 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Conversion != nil { + l = m.Conversion.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1474,6 +1827,20 @@ func (m *CustomResourceDefinitionVersion) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) n += 2 n += 2 + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Subresources != nil { + l = m.Subresources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1733,6 +2100,38 @@ func (m *JSONSchemaPropsOrStringArray) Size() (n int) { return n } +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func sovGenerated(x uint64) (n int) { for { n++ @@ -1746,6 +2145,41 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ConversionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, + `Objects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Objects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ConvertedObjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConvertedObjects), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Result:` + strings.Replace(strings.Replace(this.Result.String(), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConversionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConversionReview{`, + `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "ConversionRequest", "ConversionRequest", 1) + `,`, + `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "ConversionResponse", "ConversionResponse", 1) + `,`, + `}`, + }, "") + return s +} func (this *CustomResourceColumnDefinition) String() string { if this == nil { return "nil" @@ -1761,6 +2195,17 @@ func (this *CustomResourceColumnDefinition) String() string { }, "") return s } +func (this *CustomResourceConversion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceConversion{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `WebhookClientConfig:` + strings.Replace(fmt.Sprintf("%v", this.WebhookClientConfig), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `}`, + }, "") + return s +} func (this *CustomResourceDefinition) String() string { if this == nil { return "nil" @@ -1826,6 +2271,7 @@ func (this *CustomResourceDefinitionSpec) String() string { `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + `,`, `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, + `Conversion:` + strings.Replace(fmt.Sprintf("%v", this.Conversion), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, `}`, }, "") return s @@ -1850,6 +2296,9 @@ func (this *CustomResourceDefinitionVersion) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Served:` + fmt.Sprintf("%v", this.Served) + `,`, `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, + `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, + `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2036,6 +2485,30 @@ func (this *JSONSchemaPropsOrStringArray) String() string { }, "") return s } +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2044,7 +2517,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { +func (m *ConversionRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2067,15 +2540,15 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2100,11 +2573,11 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2129,42 +2602,13 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Format = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2174,68 +2618,22 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - m.Priority = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Priority |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Objects = append(m.Objects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.JSONPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2258,7 +2656,7 @@ func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { +func (m *ConversionResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2281,17 +2679,17 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2301,25 +2699,24 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2343,13 +2740,14 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ConvertedObjects = append(m.ConvertedObjects, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2373,7 +2771,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2398,7 +2796,7 @@ func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { +func (m *ConversionReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2421,17 +2819,17 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2441,221 +2839,28 @@ func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + if m.Request == nil { + m.Request = &ConversionRequest{} } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2679,8 +2884,10 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, CustomResourceDefinition{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Response == nil { + m.Response = &ConversionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2705,7 +2912,7 @@ func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2728,15 +2935,15 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2761,11 +2968,11 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Plural = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2790,11 +2997,11 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Singular = string(dAtA[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2819,11 +3026,11 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + m.Format = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2848,13 +3055,13 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) } - var stringLen uint64 + m.Priority = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2864,24 +3071,14 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Priority |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ListKind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2906,7 +3103,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + m.JSONPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2929,7 +3126,7 @@ func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { +func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2952,15 +3149,15 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2985,13 +3182,13 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WebhookClientConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3001,116 +3198,78 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + if m.WebhookClientConfig == nil { + m.WebhookClientConfig = &WebhookClientConfig{} } - if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.WebhookClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.Validation == nil { - m.Validation = &CustomResourceValidation{} - } - if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 6: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3134,16 +3293,13 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Subresources == nil { - m.Subresources = &CustomResourceSubresources{} - } - if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3167,14 +3323,13 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3198,8 +3353,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) - if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3224,7 +3378,7 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { +func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3247,17 +3401,17 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3267,26 +3421,53 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3310,13 +3491,13 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3341,7 +3522,36 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3364,7 +3574,7 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { +func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3387,17 +3597,17 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3407,23 +3617,826 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomResourceDefinition{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plural = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Singular = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListKind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validation == nil { + m.Validation = &CustomResourceValidation{} + } + if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Conversion == nil { + m.Conversion = &CustomResourceConversion{} + } + if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) } var v int @@ -3462,6 +4475,103 @@ func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { } } m.Storage = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &CustomResourceValidation{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Subresources == nil { + m.Subresources = &CustomResourceSubresources{} + } + if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4333,15 +5443,8 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 - v = uint64(dAtA[iNdEx-8]) - v |= uint64(dAtA[iNdEx-7]) << 8 - v |= uint64(dAtA[iNdEx-6]) << 16 - v |= uint64(dAtA[iNdEx-5]) << 24 - v |= uint64(dAtA[iNdEx-4]) << 32 - v |= uint64(dAtA[iNdEx-3]) << 40 - v |= uint64(dAtA[iNdEx-2]) << 48 - v |= uint64(dAtA[iNdEx-1]) << 56 v2 := float64(math.Float64frombits(v)) m.Maximum = &v2 case 10: @@ -4372,15 +5475,8 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 - v = uint64(dAtA[iNdEx-8]) - v |= uint64(dAtA[iNdEx-7]) << 8 - v |= uint64(dAtA[iNdEx-6]) << 16 - v |= uint64(dAtA[iNdEx-5]) << 24 - v |= uint64(dAtA[iNdEx-4]) << 32 - v |= uint64(dAtA[iNdEx-3]) << 40 - v |= uint64(dAtA[iNdEx-2]) << 48 - v |= uint64(dAtA[iNdEx-1]) << 56 v2 := float64(math.Float64frombits(v)) m.Minimum = &v2 case 12: @@ -4540,15 +5636,8 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 - v = uint64(dAtA[iNdEx-8]) - v |= uint64(dAtA[iNdEx-7]) << 8 - v |= uint64(dAtA[iNdEx-6]) << 16 - v |= uint64(dAtA[iNdEx-5]) << 24 - v |= uint64(dAtA[iNdEx-4]) << 32 - v |= uint64(dAtA[iNdEx-3]) << 40 - v |= uint64(dAtA[iNdEx-2]) << 48 - v |= uint64(dAtA[iNdEx-1]) << 56 v2 := float64(math.Float64frombits(v)) m.MultipleOf = &v2 case 20: @@ -4814,44 +5903,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4861,26 +5913,26 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Properties == nil { m.Properties = make(map[string]JSONSchemaProps) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4890,46 +5942,85 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Properties[mapkey] = *mapvalue - } else { - var mapvalue JSONSchemaProps - m.Properties[mapkey] = mapvalue } + m.Properties[mapkey] = *mapvalue iNdEx = postIndex case 30: if wireType != 2 { @@ -4990,22 +6081,108 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.PatternProperties == nil { + m.PatternProperties = make(map[string]JSONSchemaProps) + } + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - var stringLenmapkey uint64 + m.PatternProperties[mapkey] = *mapvalue + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5015,26 +6192,26 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.PatternProperties == nil { - m.PatternProperties = make(map[string]JSONSchemaProps) + if m.Dependencies == nil { + m.Dependencies = make(JSONSchemaDependencies) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &JSONSchemaPropsOrStringArray{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5044,50 +6221,89 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaPropsOrStringArray{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.PatternProperties[mapkey] = *mapvalue - } else { - var mapvalue JSONSchemaProps - m.PatternProperties[mapkey] = mapvalue } + m.Dependencies[mapkey] = *mapvalue iNdEx = postIndex - case 32: + case 33: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5111,22 +6327,18 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if m.AdditionalItems == nil { + m.AdditionalItems = &JSONSchemaPropsOrBool{} + } + if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) } - var stringLenmapkey uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5136,26 +6348,26 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Dependencies == nil { - m.Dependencies = make(JSONSchemaDependencies) + if m.Definitions == nil { + m.Definitions = make(JSONSchemaDefinitions) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + mapvalue := &JSONSchemaProps{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5165,50 +6377,89 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &JSONSchemaProps{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &JSONSchemaPropsOrStringArray{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Dependencies[mapkey] = *mapvalue - } else { - var mapvalue JSONSchemaPropsOrStringArray - m.Dependencies[mapkey] = mapvalue } + m.Definitions[mapkey] = *mapvalue iNdEx = postIndex - case 33: + case 35: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5232,16 +6483,16 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AdditionalItems == nil { - m.AdditionalItems = &JSONSchemaPropsOrBool{} + if m.ExternalDocs == nil { + m.ExternalDocs = &ExternalDocumentation{} } - if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 34: + case 36: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5265,22 +6516,18 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if m.Example == nil { + m.Example = &JSON{} + } + if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) } - var stringLenmapkey uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5290,79 +6537,65 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { + m.Nullable = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Definitions == nil { - m.Definitions = make(JSONSchemaDefinitions) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Definitions[mapkey] = *mapvalue - } else { - var mapvalue JSONSchemaProps - m.Definitions[mapkey] = mapvalue + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - iNdEx = postIndex - case 35: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5386,16 +6619,16 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExternalDocs == nil { - m.ExternalDocs = &ExternalDocumentation{} + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} } - if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 36: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5419,16 +6652,64 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Example == nil { - m.Example = &JSON{} - } - if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) + if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 37: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -5445,7 +6726,40 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { break } } - m.Nullable = bool(v != 0) + m.Allows = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &JSONSchemaProps{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5467,7 +6781,7 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { } return nil } -func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { +func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5490,10 +6804,10 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -5531,9 +6845,9 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5543,22 +6857,20 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) - if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -5581,7 +6893,7 @@ func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { } return nil } -func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { +func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5604,17 +6916,17 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5624,17 +6936,26 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Allows = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5644,24 +6965,50 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5684,7 +7031,7 @@ func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { } return nil } -func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5707,15 +7054,15 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5739,16 +7086,47 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} + if m.Service == nil { + m.Service = &ServiceReference{} } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5773,7 +7151,8 @@ func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5906,150 +7285,178 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2314 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6f, 0x5b, 0x49, - 0x15, 0xef, 0xd8, 0x71, 0xe2, 0x8c, 0x93, 0x26, 0x99, 0x6d, 0xca, 0x6d, 0x68, 0xed, 0xd4, 0xa5, - 0xab, 0x00, 0xad, 0x43, 0xcb, 0x2e, 0xbb, 0xac, 0xc4, 0x43, 0x9c, 0x14, 0xd4, 0xa5, 0x69, 0xa2, - 0x71, 0x5b, 0x04, 0xfb, 0x39, 0xb1, 0x27, 0xce, 0x6d, 0xee, 0x57, 0xef, 0xcc, 0x75, 0x13, 0x09, - 0x10, 0x1f, 0x5a, 0x21, 0x21, 0x60, 0x81, 0xad, 0x90, 0x90, 0x78, 0x01, 0x89, 0x17, 0x1e, 0xe0, - 0x01, 0xde, 0xe0, 0x0f, 0xe8, 0xe3, 0x3e, 0xae, 0x78, 0xb0, 0xa8, 0xf9, 0x17, 0x90, 0x90, 0xf2, - 0x84, 0xe6, 0xe3, 0xce, 0xfd, 0x70, 0xb2, 0xad, 0x58, 0x7b, 0xfb, 0x66, 0x9f, 0x73, 0xe6, 0xfc, - 0x7e, 0x73, 0xe6, 0xcc, 0x99, 0x73, 0x6c, 0xb8, 0xbb, 0xff, 0x2a, 0x6b, 0xd8, 0xfe, 0xea, 0x7e, - 0xb4, 0x43, 0x43, 0x8f, 0x72, 0xca, 0x56, 0x7b, 0xd4, 0xeb, 0xf8, 0xe1, 0xaa, 0x56, 0x90, 0xc0, - 0xa6, 0x07, 0x9c, 0x7a, 0xcc, 0xf6, 0x3d, 0x76, 0x95, 0x04, 0x36, 0xa3, 0x61, 0x8f, 0x86, 0xab, - 0xc1, 0x7e, 0x57, 0xe8, 0x58, 0xd6, 0x60, 0xb5, 0x77, 0x6d, 0x87, 0x72, 0x72, 0x6d, 0xb5, 0x4b, - 0x3d, 0x1a, 0x12, 0x4e, 0x3b, 0x8d, 0x20, 0xf4, 0xb9, 0x8f, 0xbe, 0xa6, 0xdc, 0x35, 0x32, 0xd6, - 0xef, 0x18, 0x77, 0x8d, 0x60, 0xbf, 0x2b, 0x74, 0x2c, 0x6b, 0xd0, 0xd0, 0xee, 0x96, 0xae, 0x76, - 0x6d, 0xbe, 0x17, 0xed, 0x34, 0xda, 0xbe, 0xbb, 0xda, 0xf5, 0xbb, 0xfe, 0xaa, 0xf4, 0xba, 0x13, - 0xed, 0xca, 0x6f, 0xf2, 0x8b, 0xfc, 0xa4, 0xd0, 0x96, 0x5e, 0x4a, 0xc8, 0xbb, 0xa4, 0xbd, 0x67, - 0x7b, 0x34, 0x3c, 0x4c, 0x18, 0xbb, 0x94, 0x93, 0xd5, 0xde, 0x10, 0xc7, 0xa5, 0xd5, 0x93, 0x56, - 0x85, 0x91, 0xc7, 0x6d, 0x97, 0x0e, 0x2d, 0xf8, 0xca, 0xd3, 0x16, 0xb0, 0xf6, 0x1e, 0x75, 0x49, - 0x7e, 0x5d, 0xfd, 0x83, 0x02, 0xac, 0xae, 0x47, 0x8c, 0xfb, 0x2e, 0xa6, 0xcc, 0x8f, 0xc2, 0x36, - 0x5d, 0xf7, 0x9d, 0xc8, 0xf5, 0x36, 0xe8, 0xae, 0xed, 0xd9, 0xdc, 0xf6, 0x3d, 0xb4, 0x0c, 0x27, - 0x3c, 0xe2, 0x52, 0x0b, 0x2c, 0x83, 0x95, 0xe9, 0xe6, 0xcc, 0xe3, 0x7e, 0xed, 0xd4, 0xa0, 0x5f, - 0x9b, 0xb8, 0x4d, 0x5c, 0x8a, 0xa5, 0x46, 0x58, 0xf0, 0xc3, 0x80, 0x5a, 0x85, 0xac, 0xc5, 0x9d, - 0xc3, 0x80, 0x62, 0xa9, 0x41, 0x2f, 0xc2, 0xc9, 0x5d, 0x3f, 0x74, 0x09, 0xb7, 0x8a, 0xd2, 0xe6, - 0xb4, 0xb6, 0x99, 0xfc, 0xba, 0x94, 0x62, 0xad, 0x45, 0x2f, 0xc3, 0x4a, 0x87, 0xb2, 0x76, 0x68, - 0x07, 0x02, 0xda, 0x9a, 0x90, 0xc6, 0x2f, 0x68, 0xe3, 0xca, 0x46, 0xa2, 0xc2, 0x69, 0x3b, 0x74, - 0x05, 0x96, 0x83, 0xd0, 0xf6, 0x43, 0x9b, 0x1f, 0x5a, 0xa5, 0x65, 0xb0, 0x52, 0x6a, 0xce, 0xeb, - 0x35, 0xe5, 0x6d, 0x2d, 0xc7, 0xc6, 0x02, 0x2d, 0xc3, 0xf2, 0xeb, 0xad, 0xad, 0xdb, 0xdb, 0x84, - 0xef, 0x59, 0x93, 0x12, 0x61, 0x42, 0x58, 0xe3, 0xf2, 0x7d, 0x2d, 0xad, 0xff, 0xb8, 0x08, 0xad, - 0x6c, 0x54, 0x52, 0xf1, 0x78, 0x17, 0x96, 0xc5, 0xb1, 0x75, 0x08, 0x27, 0x32, 0x26, 0x95, 0xeb, - 0x5f, 0x6a, 0x24, 0x29, 0x65, 0xa2, 0x9f, 0xe4, 0x91, 0xb0, 0x6e, 0xf4, 0xae, 0x35, 0xb6, 0x76, - 0xee, 0xd3, 0x36, 0xdf, 0xa4, 0x9c, 0x34, 0x91, 0xa6, 0x07, 0x13, 0x19, 0x36, 0x5e, 0xd1, 0xf7, - 0xe0, 0x04, 0x0b, 0x68, 0x5b, 0xc6, 0xb3, 0x72, 0xfd, 0x8d, 0xc6, 0x27, 0x4a, 0xd8, 0xc6, 0x49, - 0x1b, 0x69, 0x05, 0xb4, 0x9d, 0x1c, 0x96, 0xf8, 0x86, 0x25, 0x2c, 0x7a, 0x0f, 0xc0, 0x49, 0xc6, - 0x09, 0x8f, 0x98, 0x3c, 0xad, 0xca, 0xf5, 0xb7, 0xc6, 0xc5, 0x40, 0x82, 0x24, 0xc9, 0xa0, 0xbe, - 0x63, 0x0d, 0x5e, 0xff, 0x4f, 0x01, 0x5e, 0x3c, 0x69, 0xe9, 0xba, 0xef, 0x75, 0xd4, 0x71, 0xdc, - 0xd4, 0xc9, 0xa7, 0xd2, 0xf3, 0xe5, 0x74, 0xf2, 0x1d, 0xf5, 0x6b, 0x97, 0x9f, 0xea, 0x20, 0x95, - 0xa5, 0x5f, 0x35, 0xfb, 0x56, 0x99, 0x7c, 0x31, 0x4b, 0xec, 0xa8, 0x5f, 0x9b, 0x33, 0xcb, 0xb2, - 0x5c, 0x51, 0x0f, 0x22, 0x87, 0x30, 0x7e, 0x27, 0x24, 0x1e, 0x53, 0x6e, 0x6d, 0x97, 0xea, 0xf0, - 0x7d, 0xe1, 0xd9, 0xd2, 0x43, 0xac, 0x68, 0x2e, 0x69, 0x48, 0x74, 0x6b, 0xc8, 0x1b, 0x3e, 0x06, - 0x41, 0x5c, 0xac, 0x90, 0x12, 0x66, 0xee, 0x8a, 0x89, 0x25, 0x96, 0x52, 0xac, 0xb5, 0xe8, 0xf3, - 0x70, 0xca, 0xa5, 0x8c, 0x91, 0x2e, 0x95, 0x17, 0x64, 0xba, 0x39, 0xa7, 0x0d, 0xa7, 0x36, 0x95, - 0x18, 0xc7, 0xfa, 0xfa, 0x11, 0x80, 0xe7, 0x4f, 0x8a, 0xda, 0x2d, 0x9b, 0x71, 0xf4, 0xe6, 0xd0, - 0x05, 0x68, 0x3c, 0xdb, 0x0e, 0xc5, 0x6a, 0x99, 0xfe, 0xe6, 0x76, 0xc6, 0x92, 0x54, 0xf2, 0x7f, - 0x17, 0x96, 0x6c, 0x4e, 0x5d, 0x71, 0x06, 0xc5, 0x95, 0xca, 0xf5, 0x6f, 0x8d, 0x29, 0xf7, 0x9a, - 0xb3, 0x9a, 0x43, 0xe9, 0xa6, 0x40, 0xc3, 0x0a, 0xb4, 0xfe, 0xc7, 0x02, 0xbc, 0x70, 0xd2, 0x12, - 0x51, 0xf1, 0x98, 0x88, 0x78, 0xe0, 0x44, 0x21, 0x71, 0x74, 0xc6, 0x99, 0x88, 0x6f, 0x4b, 0x29, - 0xd6, 0x5a, 0x51, 0x93, 0x98, 0xed, 0x75, 0x23, 0x87, 0x84, 0x3a, 0x9d, 0xcc, 0xae, 0x5b, 0x5a, - 0x8e, 0x8d, 0x05, 0x6a, 0x40, 0xc8, 0xf6, 0xfc, 0x90, 0x4b, 0x0c, 0xab, 0xb8, 0x5c, 0x14, 0x9e, - 0x45, 0x81, 0x68, 0x19, 0x29, 0x4e, 0x59, 0x88, 0x92, 0xbb, 0x6f, 0x7b, 0x1d, 0x7d, 0xea, 0xe6, - 0x16, 0x7f, 0xd3, 0xf6, 0x3a, 0x58, 0x6a, 0x04, 0xbe, 0x63, 0x33, 0x2e, 0x24, 0xfa, 0xc8, 0x33, - 0x51, 0x97, 0x96, 0xc6, 0x42, 0xe0, 0xb7, 0x09, 0xa7, 0x5d, 0x3f, 0xb4, 0x29, 0xb3, 0x26, 0x13, - 0xfc, 0x75, 0x23, 0xc5, 0x29, 0x8b, 0xfa, 0xaf, 0xa6, 0x4e, 0x4e, 0x12, 0x51, 0x4a, 0xd0, 0x25, - 0x58, 0xea, 0x86, 0x7e, 0x14, 0xe8, 0x28, 0x99, 0x68, 0x7f, 0x43, 0x08, 0xb1, 0xd2, 0x89, 0xac, - 0xec, 0xd1, 0x50, 0x1c, 0x98, 0x0e, 0x91, 0xc9, 0xca, 0x7b, 0x4a, 0x8c, 0x63, 0x3d, 0xfa, 0x21, - 0x80, 0x25, 0x4f, 0x07, 0x47, 0xa4, 0xdc, 0x9b, 0x63, 0xca, 0x0b, 0x19, 0xde, 0x84, 0xae, 0x8a, - 0xbc, 0x42, 0x46, 0x2f, 0xc1, 0x12, 0x6b, 0xfb, 0x01, 0xd5, 0x51, 0xaf, 0xc6, 0x46, 0x2d, 0x21, - 0x3c, 0xea, 0xd7, 0x66, 0x63, 0x77, 0x52, 0x80, 0x95, 0x31, 0xfa, 0x09, 0x80, 0xb0, 0x47, 0x1c, - 0xbb, 0x43, 0xe4, 0x9b, 0x56, 0x92, 0xf4, 0x47, 0x9b, 0xd6, 0xf7, 0x8c, 0x7b, 0x75, 0x68, 0xc9, - 0x77, 0x9c, 0x82, 0x46, 0xef, 0x03, 0x38, 0xc3, 0xa2, 0x9d, 0x50, 0xaf, 0x62, 0xf2, 0xf5, 0xab, - 0x5c, 0xff, 0xf6, 0x48, 0xb9, 0xb4, 0x52, 0x00, 0xcd, 0xf9, 0x41, 0xbf, 0x36, 0x93, 0x96, 0xe0, - 0x0c, 0x01, 0xf4, 0x33, 0x00, 0xcb, 0xfa, 0x84, 0x99, 0x35, 0x25, 0x2f, 0xfc, 0xdb, 0x63, 0x3a, - 0x58, 0x9d, 0x51, 0xc9, 0x2d, 0xd0, 0x02, 0x86, 0x0d, 0x03, 0xf4, 0x77, 0x00, 0x2d, 0xd2, 0x51, - 0x05, 0x9e, 0x38, 0xdb, 0xa1, 0xed, 0x71, 0x1a, 0xaa, 0x86, 0x88, 0x59, 0x65, 0x49, 0x6f, 0xb4, - 0x6f, 0x61, 0xbe, 0xd9, 0x6a, 0x2e, 0x6b, 0x76, 0xd6, 0xda, 0x09, 0x34, 0xf0, 0x89, 0x04, 0xeb, - 0xef, 0x17, 0xf3, 0xbd, 0x5c, 0xfe, 0xa9, 0x45, 0x8f, 0x00, 0x84, 0xed, 0xf8, 0x09, 0x63, 0x16, - 0x90, 0x5b, 0x7a, 0x77, 0x4c, 0x11, 0x37, 0x6f, 0x65, 0xd2, 0xee, 0x18, 0x91, 0xa8, 0x26, 0xe6, - 0x33, 0xfa, 0x2d, 0x80, 0xb3, 0xa4, 0xdd, 0xa6, 0x01, 0xa7, 0x1d, 0x55, 0x01, 0x0b, 0x9f, 0xc2, - 0x25, 0x5f, 0xd4, 0xac, 0x66, 0xd7, 0xd2, 0xd0, 0x38, 0xcb, 0x04, 0xbd, 0x06, 0x4f, 0x33, 0xee, - 0x87, 0xb4, 0x13, 0xe7, 0x8b, 0xae, 0xce, 0x68, 0xd0, 0xaf, 0x9d, 0x6e, 0x65, 0x34, 0x38, 0x67, - 0x59, 0xff, 0x0d, 0x80, 0xb5, 0xa7, 0xe4, 0xe3, 0x33, 0xb4, 0xd7, 0x2f, 0xc2, 0x49, 0xb9, 0xdd, - 0x8e, 0x8c, 0x4a, 0x39, 0xd5, 0x2f, 0x49, 0x29, 0xd6, 0x5a, 0x51, 0x4d, 0x05, 0xbe, 0x78, 0xe3, - 0x8b, 0xd2, 0xd0, 0x54, 0xd3, 0x96, 0x12, 0xe3, 0x58, 0x5f, 0xff, 0x2f, 0xc8, 0xa7, 0x4a, 0xea, - 0x92, 0xb6, 0xda, 0xc4, 0xa1, 0x68, 0x03, 0xce, 0x8b, 0x6e, 0x10, 0xd3, 0xc0, 0xb1, 0xdb, 0x84, - 0xc9, 0x6e, 0x59, 0x71, 0xb4, 0xb4, 0xdb, 0xf9, 0x56, 0x4e, 0x8f, 0x87, 0x56, 0xa0, 0xd7, 0x21, - 0x52, 0x1d, 0x52, 0xc6, 0x8f, 0x2a, 0xf6, 0xa6, 0xd7, 0x69, 0x0d, 0x59, 0xe0, 0x63, 0x56, 0xa1, - 0x75, 0xb8, 0xe0, 0x90, 0x1d, 0xea, 0xb4, 0xa8, 0x43, 0xdb, 0xdc, 0x0f, 0xa5, 0x2b, 0x35, 0x4f, - 0x2c, 0x0e, 0xfa, 0xb5, 0x85, 0x5b, 0x79, 0x25, 0x1e, 0xb6, 0xaf, 0x5f, 0xcc, 0x9f, 0x48, 0x7a, - 0xe3, 0xaa, 0xef, 0xfc, 0x7d, 0x01, 0x2e, 0x9d, 0x5c, 0xd3, 0xd0, 0x8f, 0x92, 0xf6, 0x58, 0x75, - 0x3f, 0x6f, 0x8f, 0xab, 0x7e, 0xea, 0xfe, 0x18, 0x0e, 0xf7, 0xc6, 0xe8, 0xfb, 0xe2, 0x29, 0x22, - 0x0e, 0xd5, 0x17, 0xe5, 0xad, 0xb1, 0x51, 0x10, 0x20, 0xcd, 0x69, 0xf5, 0xca, 0x11, 0x47, 0x3e, - 0x6a, 0xc4, 0xa1, 0xf5, 0x3f, 0x81, 0xfc, 0x84, 0x94, 0xbc, 0x39, 0xe8, 0xe7, 0x00, 0xce, 0xf9, - 0x01, 0xf5, 0xd6, 0xb6, 0x6f, 0xde, 0xfb, 0x72, 0x4b, 0x0e, 0x9e, 0x3a, 0x54, 0xb7, 0x3f, 0x21, - 0x4f, 0x31, 0xb7, 0x29, 0x87, 0xdb, 0xa1, 0x1f, 0xb0, 0xe6, 0x0b, 0x83, 0x7e, 0x6d, 0x6e, 0x2b, - 0x0b, 0x85, 0xf3, 0xd8, 0x75, 0x17, 0x2e, 0xde, 0x38, 0xe0, 0x34, 0xf4, 0x88, 0xb3, 0xe1, 0xb7, - 0x23, 0x97, 0x7a, 0x5c, 0x11, 0xcd, 0x8d, 0x9b, 0xe0, 0x19, 0xc7, 0xcd, 0x0b, 0xb0, 0x18, 0x85, - 0x8e, 0xce, 0xe2, 0x8a, 0x36, 0x2f, 0xde, 0xc5, 0xb7, 0xb0, 0x90, 0xd7, 0x2f, 0xc2, 0x09, 0xc1, - 0x13, 0x9d, 0x83, 0xc5, 0x90, 0x3c, 0x94, 0x5e, 0x67, 0x9a, 0x53, 0xc2, 0x04, 0x93, 0x87, 0x58, - 0xc8, 0xea, 0xff, 0x3c, 0x0f, 0xe7, 0x72, 0x7b, 0x41, 0x4b, 0xb0, 0x60, 0x77, 0x34, 0x07, 0xa8, - 0x9d, 0x16, 0x6e, 0x6e, 0xe0, 0x82, 0xdd, 0x41, 0xaf, 0xc0, 0x49, 0x35, 0xc0, 0x6b, 0xd0, 0x9a, - 0x29, 0x01, 0x52, 0x2a, 0x7a, 0x8f, 0xc4, 0x9d, 0x20, 0xa2, 0xcd, 0x25, 0x07, 0xba, 0xab, 0x6f, - 0x89, 0xe2, 0x40, 0x77, 0xb1, 0x90, 0xfd, 0xbf, 0xb3, 0x76, 0x3c, 0xec, 0x97, 0x9e, 0x61, 0xd8, - 0x9f, 0xfc, 0xd8, 0x61, 0xff, 0x12, 0x2c, 0x71, 0x9b, 0x3b, 0xd4, 0x9a, 0xca, 0xb6, 0x88, 0x77, - 0x84, 0x10, 0x2b, 0x1d, 0xba, 0x0f, 0xa7, 0x3a, 0x74, 0x97, 0x44, 0x0e, 0xb7, 0xca, 0x32, 0x85, - 0xd6, 0x47, 0x90, 0x42, 0xcd, 0x8a, 0xa8, 0x8a, 0x1b, 0xca, 0x2f, 0x8e, 0x01, 0xd0, 0x65, 0x38, - 0xe5, 0x92, 0x03, 0xdb, 0x8d, 0x5c, 0x6b, 0x7a, 0x19, 0xac, 0x00, 0x65, 0xb6, 0xa9, 0x44, 0x38, - 0xd6, 0x89, 0xca, 0x48, 0x0f, 0xda, 0x4e, 0xc4, 0xec, 0x1e, 0xd5, 0x4a, 0x0b, 0xca, 0x82, 0x6b, - 0x2a, 0xe3, 0x8d, 0x9c, 0x1e, 0x0f, 0xad, 0x90, 0x60, 0xb6, 0x27, 0x17, 0x57, 0x52, 0x60, 0x4a, - 0x84, 0x63, 0x5d, 0x16, 0x4c, 0xdb, 0xcf, 0x9c, 0x04, 0xa6, 0x17, 0x0f, 0xad, 0x40, 0x5f, 0x84, - 0xd3, 0x2e, 0x39, 0xb8, 0x45, 0xbd, 0x2e, 0xdf, 0xb3, 0x66, 0x97, 0xc1, 0x4a, 0xb1, 0x39, 0x3b, - 0xe8, 0xd7, 0xa6, 0x37, 0x63, 0x21, 0x4e, 0xf4, 0xd2, 0xd8, 0xf6, 0xb4, 0xf1, 0xe9, 0x94, 0x71, - 0x2c, 0xc4, 0x89, 0x5e, 0x3c, 0x3a, 0x01, 0xe1, 0xe2, 0x72, 0x59, 0x73, 0xd9, 0x16, 0x7e, 0x5b, - 0x89, 0x71, 0xac, 0x47, 0x2b, 0xb0, 0xec, 0x92, 0x03, 0x39, 0x6e, 0x59, 0xf3, 0xd2, 0xed, 0x8c, - 0xe8, 0xc3, 0x36, 0xb5, 0x0c, 0x1b, 0xad, 0xb4, 0xb4, 0x3d, 0x65, 0xb9, 0x90, 0xb2, 0xd4, 0x32, - 0x6c, 0xb4, 0x22, 0x89, 0x23, 0xcf, 0x7e, 0x10, 0x51, 0x65, 0x8c, 0x64, 0x64, 0x4c, 0x12, 0xdf, - 0x4d, 0x54, 0x38, 0x6d, 0x27, 0xc6, 0x1d, 0x37, 0x72, 0xb8, 0x1d, 0x38, 0x74, 0x6b, 0xd7, 0x7a, - 0x41, 0xc6, 0x5f, 0x76, 0xce, 0x9b, 0x46, 0x8a, 0x53, 0x16, 0x88, 0xc2, 0x09, 0xea, 0x45, 0xae, - 0x75, 0x46, 0x36, 0x4c, 0x23, 0x49, 0x41, 0x73, 0x73, 0x6e, 0x78, 0x91, 0x8b, 0xa5, 0x7b, 0xf4, - 0x0a, 0x9c, 0x75, 0xc9, 0x81, 0x28, 0x07, 0x34, 0xe4, 0x62, 0x10, 0x5b, 0x94, 0x9b, 0x5f, 0x10, - 0x4d, 0xca, 0x66, 0x5a, 0x81, 0xb3, 0x76, 0x72, 0xa1, 0xed, 0xa5, 0x16, 0x9e, 0x4d, 0x2d, 0x4c, - 0x2b, 0x70, 0xd6, 0x4e, 0x44, 0x3a, 0xa4, 0x0f, 0x22, 0x3b, 0xa4, 0x1d, 0xeb, 0x33, 0xb2, 0xaf, - 0x91, 0x91, 0xc6, 0x5a, 0x86, 0x8d, 0x16, 0xf5, 0xe2, 0xb9, 0xdc, 0x92, 0xd7, 0xf0, 0xee, 0x68, - 0x2b, 0xf9, 0x56, 0xb8, 0x16, 0x86, 0xe4, 0x50, 0xbd, 0x34, 0xe9, 0x89, 0x1c, 0x31, 0x58, 0x22, - 0x8e, 0xb3, 0xb5, 0x6b, 0x9d, 0x93, 0xb1, 0x1f, 0xf5, 0x0b, 0x62, 0xaa, 0xce, 0x9a, 0x00, 0xc1, - 0x0a, 0x4b, 0x80, 0xfa, 0x9e, 0x48, 0x8d, 0xa5, 0xf1, 0x82, 0x6e, 0x09, 0x10, 0xac, 0xb0, 0xe4, - 0x4e, 0xbd, 0xc3, 0xad, 0x5d, 0xeb, 0xb3, 0x63, 0xde, 0xa9, 0x00, 0xc1, 0x0a, 0x0b, 0xd9, 0xb0, - 0xe8, 0xf9, 0xdc, 0x3a, 0x3f, 0x96, 0xe7, 0x59, 0x3e, 0x38, 0xb7, 0x7d, 0x8e, 0x05, 0x06, 0xfa, - 0x35, 0x80, 0x30, 0x48, 0x52, 0xf4, 0xc2, 0x48, 0xc6, 0xbd, 0x1c, 0x64, 0x23, 0xc9, 0xed, 0x1b, - 0x1e, 0x0f, 0x0f, 0x93, 0xd1, 0x23, 0x75, 0x07, 0x52, 0x2c, 0xd0, 0x1f, 0x00, 0x3c, 0x93, 0x9e, - 0xa8, 0x0c, 0xbd, 0xaa, 0x8c, 0xc8, 0x9d, 0x51, 0xa7, 0x79, 0xd3, 0xf7, 0x9d, 0xa6, 0x35, 0xe8, - 0xd7, 0xce, 0xac, 0x1d, 0x83, 0x8a, 0x8f, 0xe5, 0x82, 0xfe, 0x0c, 0xe0, 0x82, 0xae, 0xa2, 0x29, - 0x86, 0x35, 0x19, 0x40, 0x3a, 0xea, 0x00, 0xe6, 0x71, 0x54, 0x1c, 0xcf, 0xe9, 0x38, 0x2e, 0x0c, - 0xe9, 0xf1, 0x30, 0x35, 0xf4, 0x37, 0x00, 0x67, 0x3a, 0x34, 0xa0, 0x5e, 0x87, 0x7a, 0x6d, 0xc1, - 0x75, 0x79, 0x24, 0x93, 0x66, 0x9e, 0xeb, 0x46, 0x0a, 0x42, 0xd1, 0x6c, 0x68, 0x9a, 0x33, 0x69, - 0xd5, 0x51, 0xbf, 0x76, 0x36, 0x59, 0x9a, 0xd6, 0xe0, 0x0c, 0x4b, 0xf4, 0x01, 0x80, 0x73, 0xc9, - 0x01, 0xa8, 0x27, 0xe5, 0xe2, 0x18, 0xf3, 0x40, 0xb6, 0xaf, 0x6b, 0x59, 0x40, 0x9c, 0x67, 0x80, - 0xfe, 0x02, 0x44, 0xa7, 0x16, 0xcf, 0x8d, 0xcc, 0xaa, 0xcb, 0x58, 0xbe, 0x33, 0xf2, 0x58, 0x1a, - 0x04, 0x15, 0xca, 0x2b, 0x49, 0x2b, 0x68, 0x34, 0x47, 0xfd, 0xda, 0x62, 0x3a, 0x92, 0x46, 0x81, - 0xd3, 0x0c, 0xd1, 0x4f, 0x01, 0x9c, 0xa1, 0x49, 0xc7, 0xcd, 0xac, 0x4b, 0x23, 0x09, 0xe2, 0xb1, - 0x4d, 0xbc, 0xfa, 0x8d, 0x29, 0xa5, 0x62, 0x38, 0x83, 0x2d, 0x3a, 0x48, 0x7a, 0x40, 0xdc, 0xc0, - 0xa1, 0xd6, 0xe7, 0x46, 0xdc, 0x41, 0xde, 0x50, 0x7e, 0x71, 0x0c, 0x80, 0xae, 0xc0, 0xb2, 0x17, - 0x39, 0x0e, 0xd9, 0x71, 0xa8, 0x75, 0x59, 0xf6, 0x22, 0xe6, 0xe7, 0xa6, 0xdb, 0x5a, 0x8e, 0x8d, - 0xc5, 0x92, 0x98, 0x93, 0x72, 0xf7, 0x0c, 0xcd, 0xc3, 0xe2, 0x3e, 0x3d, 0x54, 0x63, 0x00, 0x16, - 0x1f, 0x51, 0x07, 0x96, 0x7a, 0xc4, 0x89, 0xe2, 0x51, 0x6f, 0xc4, 0x35, 0x1a, 0x2b, 0xe7, 0xaf, - 0x15, 0x5e, 0x05, 0x4b, 0x8f, 0x00, 0x3c, 0x7b, 0xfc, 0xf5, 0x7f, 0xae, 0xb4, 0x7e, 0x07, 0xe0, - 0xc2, 0xd0, 0x4d, 0x3f, 0x86, 0xd1, 0x83, 0x2c, 0xa3, 0x37, 0x46, 0x7d, 0x65, 0x5b, 0x3c, 0xb4, - 0xbd, 0xae, 0xec, 0x53, 0xd2, 0xf4, 0x7e, 0x01, 0xe0, 0x7c, 0xfe, 0xf2, 0x3c, 0xcf, 0x78, 0xd5, - 0x1f, 0x15, 0xe0, 0xd9, 0xe3, 0xdb, 0x2b, 0x14, 0x9a, 0x39, 0x72, 0x3c, 0xf3, 0x38, 0x4c, 0x66, - 0x52, 0x33, 0x82, 0xbe, 0x07, 0x60, 0xe5, 0xbe, 0xb1, 0x8b, 0xff, 0xd7, 0x19, 0xf9, 0x2f, 0x01, - 0x71, 0xb5, 0x4a, 0x14, 0x0c, 0xa7, 0x71, 0xeb, 0x7f, 0x05, 0x70, 0xf1, 0xd8, 0x32, 0x2c, 0x06, - 0x56, 0xe2, 0x38, 0xfe, 0x43, 0xf5, 0x83, 0x4e, 0xea, 0x07, 0xb6, 0x35, 0x29, 0xc5, 0x5a, 0x9b, - 0x8a, 0x5e, 0xe1, 0xd3, 0x8a, 0x5e, 0xfd, 0x1f, 0x00, 0x9e, 0xff, 0xb8, 0x4c, 0x7c, 0x2e, 0x47, - 0xba, 0x02, 0xcb, 0xba, 0x85, 0x3a, 0x94, 0xc7, 0xa9, 0xa7, 0x06, 0x5d, 0x34, 0xe4, 0x7f, 0xed, - 0xea, 0x53, 0xf3, 0xea, 0xe3, 0x27, 0xd5, 0x53, 0x1f, 0x3e, 0xa9, 0x9e, 0xfa, 0xe8, 0x49, 0xf5, - 0xd4, 0x0f, 0x06, 0x55, 0xf0, 0x78, 0x50, 0x05, 0x1f, 0x0e, 0xaa, 0xe0, 0xa3, 0x41, 0x15, 0xfc, - 0x6b, 0x50, 0x05, 0xbf, 0xfc, 0x77, 0xf5, 0xd4, 0x77, 0xa6, 0x34, 0xf8, 0xff, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x55, 0xc0, 0xd8, 0xd9, 0x04, 0x22, 0x00, 0x00, + // 2762 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, + 0x15, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb2, 0xa4, 0x76, 0xe4, 0x8c, 0x85, 0xbd, 0x2b, 0xad, + 0x71, 0x4a, 0x04, 0x7b, 0x15, 0x9b, 0x84, 0x84, 0x54, 0x71, 0xd0, 0x4a, 0x4a, 0x4a, 0xc6, 0xfa, + 0xa0, 0xd7, 0x76, 0x80, 0x7c, 0xb6, 0x66, 0x7b, 0x57, 0x63, 0xcd, 0x97, 0xa7, 0x67, 0x56, 0x52, + 0x05, 0x28, 0x48, 0x2a, 0x05, 0x45, 0x01, 0xa1, 0x88, 0x2f, 0x14, 0x70, 0x00, 0x8a, 0x0b, 0x07, + 0x38, 0xc0, 0x0d, 0xfe, 0x00, 0x1f, 0x53, 0x9c, 0x52, 0x1c, 0xb6, 0xf0, 0xe6, 0x5f, 0xa0, 0x8a, + 0x2a, 0x9d, 0xa8, 0xfe, 0x98, 0x9e, 0xd9, 0xd9, 0x5d, 0x5b, 0x15, 0xef, 0xc6, 0xdc, 0x34, 0xef, + 0xbd, 0x7e, 0xbf, 0xd7, 0xaf, 0xdf, 0x7b, 0xfd, 0xfa, 0xad, 0x40, 0x7d, 0xff, 0x25, 0x5a, 0x36, + 0xdd, 0xe5, 0xfd, 0x70, 0x97, 0xf8, 0x0e, 0x09, 0x08, 0x5d, 0x6e, 0x12, 0xa7, 0xe6, 0xfa, 0xcb, + 0x92, 0x81, 0x3d, 0x93, 0x1c, 0x06, 0xc4, 0xa1, 0xa6, 0xeb, 0xd0, 0x2b, 0xd8, 0x33, 0x29, 0xf1, + 0x9b, 0xc4, 0x5f, 0xf6, 0xf6, 0x1b, 0x8c, 0x47, 0x3b, 0x05, 0x96, 0x9b, 0x57, 0x77, 0x49, 0x80, + 0xaf, 0x2e, 0x37, 0x88, 0x43, 0x7c, 0x1c, 0x90, 0x5a, 0xd9, 0xf3, 0xdd, 0xc0, 0x85, 0x5f, 0x17, + 0xea, 0xca, 0x1d, 0xd2, 0x6f, 0x2b, 0x75, 0x65, 0x6f, 0xbf, 0xc1, 0x78, 0xb4, 0x53, 0xa0, 0x2c, + 0xd5, 0xcd, 0x5f, 0x69, 0x98, 0xc1, 0x5e, 0xb8, 0x5b, 0x36, 0x5c, 0x7b, 0xb9, 0xe1, 0x36, 0xdc, + 0x65, 0xae, 0x75, 0x37, 0xac, 0xf3, 0x2f, 0xfe, 0xc1, 0xff, 0x12, 0x68, 0xf3, 0xcf, 0xc7, 0xc6, + 0xdb, 0xd8, 0xd8, 0x33, 0x1d, 0xe2, 0x1f, 0xc5, 0x16, 0xdb, 0x24, 0xc0, 0xcb, 0xcd, 0x2e, 0x1b, + 0xe7, 0x97, 0xfb, 0xad, 0xf2, 0x43, 0x27, 0x30, 0x6d, 0xd2, 0xb5, 0xe0, 0xab, 0x8f, 0x5a, 0x40, + 0x8d, 0x3d, 0x62, 0xe3, 0xf4, 0xba, 0xd2, 0xb1, 0x06, 0x66, 0x57, 0x5d, 0xa7, 0x49, 0x7c, 0xb6, + 0x4b, 0x44, 0xee, 0x86, 0x84, 0x06, 0xb0, 0x02, 0xb2, 0xa1, 0x59, 0xd3, 0xb5, 0x05, 0x6d, 0x69, + 0xbc, 0xf2, 0xdc, 0xfd, 0x56, 0xf1, 0x54, 0xbb, 0x55, 0xcc, 0xde, 0xda, 0x58, 0x3b, 0x6e, 0x15, + 0x17, 0xfb, 0x21, 0x05, 0x47, 0x1e, 0xa1, 0xe5, 0x5b, 0x1b, 0x6b, 0x88, 0x2d, 0x86, 0xaf, 0x82, + 0xd9, 0x1a, 0xa1, 0xa6, 0x4f, 0x6a, 0x2b, 0x3b, 0x1b, 0xb7, 0x85, 0x7e, 0x3d, 0xc3, 0x35, 0x9e, + 0x93, 0x1a, 0x67, 0xd7, 0xd2, 0x02, 0xa8, 0x7b, 0x0d, 0xfc, 0x16, 0x18, 0x73, 0x77, 0xef, 0x10, + 0x23, 0xa0, 0x7a, 0x76, 0x21, 0xbb, 0x34, 0x71, 0xed, 0x4a, 0x39, 0x3e, 0x41, 0x65, 0x02, 0x3f, + 0x36, 0xb9, 0xd9, 0x32, 0xc2, 0x07, 0xeb, 0xd1, 0xc9, 0x55, 0xa6, 0x25, 0xda, 0xd8, 0xb6, 0xd0, + 0x82, 0x22, 0x75, 0xa5, 0x3f, 0x64, 0x00, 0x4c, 0x6e, 0x9e, 0x7a, 0xae, 0x43, 0xc9, 0x40, 0x76, + 0x4f, 0xc1, 0x8c, 0xc1, 0x35, 0x07, 0xa4, 0x26, 0x71, 0xf5, 0xcc, 0x67, 0xb1, 0x5e, 0x97, 0xf8, + 0x33, 0xab, 0x29, 0x75, 0xa8, 0x0b, 0x00, 0xde, 0x04, 0xa3, 0x3e, 0xa1, 0xa1, 0x15, 0xe8, 0xd9, + 0x05, 0x6d, 0x69, 0xe2, 0xda, 0xe5, 0xbe, 0x50, 0x3c, 0xbe, 0x59, 0xf0, 0x95, 0x9b, 0x57, 0xcb, + 0xd5, 0x00, 0x07, 0x21, 0xad, 0x9c, 0x96, 0x48, 0xa3, 0x88, 0xeb, 0x40, 0x52, 0x57, 0xe9, 0xc7, + 0x19, 0x30, 0x93, 0xf4, 0x52, 0xd3, 0x24, 0x07, 0xf0, 0x00, 0x8c, 0xf9, 0x22, 0x58, 0xb8, 0x9f, + 0x26, 0xae, 0xed, 0x94, 0x1f, 0x2b, 0xad, 0xca, 0x5d, 0x41, 0x58, 0x99, 0x60, 0x67, 0x26, 0x3f, + 0x50, 0x84, 0x06, 0xdf, 0x05, 0x79, 0x5f, 0x1e, 0x14, 0x8f, 0xa6, 0x89, 0x6b, 0xdf, 0x1c, 0x20, + 0xb2, 0x50, 0x5c, 0x99, 0x6c, 0xb7, 0x8a, 0xf9, 0xe8, 0x0b, 0x29, 0xc0, 0xd2, 0x47, 0x19, 0x50, + 0x58, 0x0d, 0x69, 0xe0, 0xda, 0x88, 0x50, 0x37, 0xf4, 0x0d, 0xb2, 0xea, 0x5a, 0xa1, 0xed, 0xac, + 0x91, 0xba, 0xe9, 0x98, 0x01, 0x8b, 0xd6, 0x05, 0x30, 0xe2, 0x60, 0x9b, 0xc8, 0xe8, 0x99, 0x94, + 0x3e, 0x1d, 0xd9, 0xc2, 0x36, 0x41, 0x9c, 0xc3, 0x24, 0x58, 0xb0, 0xc8, 0x5c, 0x50, 0x12, 0x37, + 0x8f, 0x3c, 0x82, 0x38, 0x07, 0x3e, 0x03, 0x46, 0xeb, 0xae, 0x6f, 0x63, 0x71, 0x8e, 0xe3, 0xf1, + 0xc9, 0xbc, 0xc2, 0xa9, 0x48, 0x72, 0xe1, 0x0b, 0x60, 0xa2, 0x46, 0xa8, 0xe1, 0x9b, 0x1e, 0x83, + 0xd6, 0x47, 0xb8, 0xf0, 0x19, 0x29, 0x3c, 0xb1, 0x16, 0xb3, 0x50, 0x52, 0x0e, 0x5e, 0x06, 0x79, + 0xcf, 0x37, 0x5d, 0xdf, 0x0c, 0x8e, 0xf4, 0xdc, 0x82, 0xb6, 0x94, 0xab, 0xcc, 0xc8, 0x35, 0xf9, + 0x1d, 0x49, 0x47, 0x4a, 0x02, 0x2e, 0x80, 0xfc, 0xf5, 0xea, 0xf6, 0xd6, 0x0e, 0x0e, 0xf6, 0xf4, + 0x51, 0x8e, 0x30, 0xc2, 0xa4, 0x51, 0xfe, 0x8e, 0xa4, 0x96, 0xde, 0xcb, 0x00, 0x3d, 0xed, 0x95, + 0xc8, 0xa5, 0xf0, 0x15, 0x90, 0xa7, 0x01, 0xab, 0x38, 0x8d, 0x23, 0xe9, 0x93, 0x67, 0x23, 0xb0, + 0xaa, 0xa4, 0x1f, 0xb7, 0x8a, 0x67, 0xe3, 0x15, 0x11, 0x95, 0xfb, 0x43, 0xad, 0x85, 0xbf, 0xd5, + 0xc0, 0x99, 0x03, 0xb2, 0xbb, 0xe7, 0xba, 0xfb, 0xab, 0x96, 0x49, 0x9c, 0x60, 0xd5, 0x75, 0xea, + 0x66, 0x43, 0xc6, 0x00, 0x7a, 0xcc, 0x18, 0x78, 0xad, 0x5b, 0x73, 0xe5, 0xe9, 0x76, 0xab, 0x78, + 0xa6, 0x07, 0x03, 0xf5, 0xb2, 0xa3, 0xf4, 0x7e, 0x36, 0xed, 0x84, 0x44, 0x50, 0xbc, 0x03, 0xf2, + 0x2c, 0xd9, 0x6a, 0x38, 0xc0, 0x32, 0x5d, 0x9e, 0x3b, 0x59, 0x6a, 0x8a, 0xcc, 0xde, 0x24, 0x01, + 0xae, 0x40, 0xe9, 0x36, 0x10, 0xd3, 0x90, 0xd2, 0x0a, 0xbf, 0x07, 0x46, 0xa8, 0x47, 0x0c, 0xe9, + 0x8e, 0xd7, 0x1f, 0x37, 0x25, 0xfa, 0x6c, 0xa4, 0xea, 0x11, 0x23, 0x8e, 0x58, 0xf6, 0x85, 0x38, + 0x2c, 0xfc, 0x40, 0x03, 0xa3, 0x94, 0x97, 0x11, 0x59, 0x7a, 0xde, 0x1c, 0x96, 0x05, 0xa9, 0x5a, + 0x25, 0xbe, 0x91, 0x04, 0x2f, 0xfd, 0x27, 0x03, 0x16, 0xfb, 0x2d, 0x5d, 0x75, 0x9d, 0x9a, 0x38, + 0x8e, 0x0d, 0x99, 0x81, 0x22, 0x1e, 0x5f, 0x48, 0x66, 0xe0, 0x71, 0xab, 0x78, 0xe9, 0x91, 0x0a, + 0x12, 0xa9, 0xfa, 0x35, 0xb5, 0x6f, 0x91, 0xce, 0x8b, 0x9d, 0x86, 0x1d, 0xb7, 0x8a, 0xd3, 0x6a, + 0x59, 0xa7, 0xad, 0xb0, 0x09, 0xa0, 0x85, 0x69, 0x70, 0xd3, 0xc7, 0x0e, 0x15, 0x6a, 0x4d, 0x9b, + 0x48, 0xf7, 0x3d, 0x7b, 0xb2, 0xf0, 0x60, 0x2b, 0x2a, 0xf3, 0x12, 0x12, 0xde, 0xe8, 0xd2, 0x86, + 0x7a, 0x20, 0xb0, 0xea, 0xe2, 0x13, 0x4c, 0x55, 0xc1, 0x48, 0xd4, 0x7d, 0x46, 0x45, 0x92, 0x0b, + 0xbf, 0x04, 0xc6, 0x6c, 0x42, 0x29, 0x6e, 0x10, 0x5e, 0x25, 0xc6, 0xe3, 0x8b, 0x74, 0x53, 0x90, + 0x51, 0xc4, 0x67, 0x5d, 0xc4, 0xf9, 0x7e, 0x5e, 0xbb, 0x61, 0xd2, 0x00, 0xbe, 0xd1, 0x95, 0x00, + 0xe5, 0x93, 0xed, 0x90, 0xad, 0xe6, 0xe1, 0xaf, 0x4a, 0x54, 0x44, 0x49, 0x04, 0xff, 0x77, 0x41, + 0xce, 0x0c, 0x88, 0x1d, 0xdd, 0xb0, 0xaf, 0x0d, 0x29, 0xf6, 0x2a, 0x53, 0xd2, 0x86, 0xdc, 0x06, + 0x43, 0x43, 0x02, 0xb4, 0xf4, 0xc7, 0x0c, 0xb8, 0xd0, 0x6f, 0x09, 0x2b, 0xfb, 0x94, 0x79, 0xdc, + 0xb3, 0x42, 0x1f, 0x5b, 0x32, 0xe2, 0x94, 0xc7, 0x77, 0x38, 0x15, 0x49, 0x2e, 0x2b, 0xcc, 0xd4, + 0x74, 0x1a, 0xa1, 0x85, 0x7d, 0x19, 0x4e, 0x6a, 0xd7, 0x55, 0x49, 0x47, 0x4a, 0x02, 0x96, 0x01, + 0xa0, 0x7b, 0xae, 0x1f, 0x70, 0x0c, 0xde, 0x1a, 0x8d, 0x57, 0x4e, 0xb3, 0x02, 0x51, 0x55, 0x54, + 0x94, 0x90, 0x60, 0xf7, 0xce, 0xbe, 0xe9, 0xd4, 0xe4, 0xa9, 0xab, 0x2c, 0xfe, 0x86, 0xe9, 0xd4, + 0x10, 0xe7, 0x30, 0x7c, 0xcb, 0xa4, 0x01, 0xa3, 0xc8, 0x23, 0xef, 0xf0, 0x3a, 0x97, 0x54, 0x12, + 0x0c, 0xdf, 0x60, 0xb5, 0xd9, 0xf5, 0x4d, 0x42, 0xf5, 0xd1, 0x18, 0x7f, 0x55, 0x51, 0x51, 0x42, + 0xa2, 0xf4, 0xeb, 0x7c, 0xff, 0x20, 0x61, 0xa5, 0x04, 0x5e, 0x04, 0xb9, 0x86, 0xef, 0x86, 0x9e, + 0xf4, 0x92, 0xf2, 0xf6, 0xab, 0x8c, 0x88, 0x04, 0x8f, 0x45, 0x65, 0xb3, 0xa3, 0x99, 0x54, 0x51, + 0x19, 0xb5, 0x90, 0x11, 0x1f, 0xfe, 0x50, 0x03, 0x39, 0x47, 0x3a, 0x87, 0x85, 0xdc, 0x1b, 0x43, + 0x8a, 0x0b, 0xee, 0xde, 0xd8, 0x5c, 0xe1, 0x79, 0x81, 0x0c, 0x9f, 0x07, 0x39, 0x6a, 0xb8, 0x1e, + 0x91, 0x5e, 0x2f, 0x44, 0x42, 0x55, 0x46, 0x3c, 0x6e, 0x15, 0xa7, 0x22, 0x75, 0x9c, 0x80, 0x84, + 0x30, 0xfc, 0x91, 0x06, 0x40, 0x13, 0x5b, 0x66, 0x0d, 0xf3, 0x8b, 0x3d, 0xc7, 0xcd, 0x1f, 0x6c, + 0x58, 0xdf, 0x56, 0xea, 0xc5, 0xa1, 0xc5, 0xdf, 0x28, 0x01, 0x0d, 0x3f, 0xd4, 0xc0, 0x24, 0x0d, + 0x77, 0x7d, 0xb9, 0x8a, 0xf2, 0x16, 0x60, 0xe2, 0xda, 0xb7, 0x07, 0x6a, 0x4b, 0x35, 0x01, 0x50, + 0x99, 0x69, 0xb7, 0x8a, 0x93, 0x49, 0x0a, 0xea, 0x30, 0x00, 0xfe, 0x54, 0x03, 0x79, 0x79, 0xc2, + 0x54, 0x1f, 0xe3, 0x09, 0xff, 0xd6, 0x90, 0x0e, 0x56, 0x46, 0x54, 0x9c, 0x05, 0x92, 0x40, 0x91, + 0xb2, 0x00, 0xfe, 0x5d, 0x03, 0x3a, 0xae, 0x89, 0x02, 0x8f, 0xad, 0x1d, 0xdf, 0x74, 0x02, 0xe2, + 0x8b, 0xae, 0x90, 0xea, 0x79, 0x6e, 0xde, 0x60, 0xef, 0xc2, 0x74, 0xc7, 0x59, 0x59, 0x90, 0xd6, + 0xe9, 0x2b, 0x7d, 0xcc, 0x40, 0x7d, 0x0d, 0xe4, 0x81, 0x66, 0xa8, 0xd6, 0x4b, 0x1f, 0x1f, 0x42, + 0xa0, 0xc5, 0x9d, 0x9d, 0xac, 0x0e, 0x71, 0xbb, 0x9d, 0x80, 0x2e, 0x7d, 0x98, 0x4d, 0xb7, 0xd6, + 0xe9, 0x4b, 0x1f, 0xde, 0x13, 0xc6, 0x8a, 0xad, 0x50, 0x5d, 0xe3, 0xce, 0x7d, 0x67, 0x48, 0x67, + 0xaf, 0x6e, 0xed, 0xb8, 0xf1, 0x52, 0x24, 0x8a, 0x12, 0x76, 0xc0, 0x5f, 0x69, 0x60, 0x0a, 0x1b, + 0x06, 0xf1, 0x02, 0x52, 0x13, 0xb5, 0x38, 0xf3, 0x39, 0x94, 0x9b, 0x39, 0x69, 0xd5, 0xd4, 0x4a, + 0x12, 0x1a, 0x75, 0x5a, 0x02, 0x5f, 0x06, 0xa7, 0x69, 0xe0, 0xfa, 0xa4, 0x16, 0x45, 0xae, 0xbc, + 0x27, 0x60, 0xbb, 0x55, 0x3c, 0x5d, 0xed, 0xe0, 0xa0, 0x94, 0x64, 0xe9, 0xd3, 0x11, 0x50, 0x7c, + 0x44, 0x66, 0x9c, 0xe0, 0xb5, 0xf3, 0x0c, 0x18, 0xe5, 0xdb, 0xad, 0x71, 0xaf, 0xe4, 0x13, 0x9d, + 0x1b, 0xa7, 0x22, 0xc9, 0x65, 0x75, 0x9d, 0xe1, 0xb3, 0x6e, 0x23, 0xcb, 0x05, 0x55, 0x5d, 0xaf, + 0x0a, 0x32, 0x8a, 0xf8, 0xf0, 0x5d, 0x30, 0x2a, 0xa6, 0x19, 0xbc, 0xa8, 0x0e, 0xb1, 0x30, 0x02, + 0x6e, 0x27, 0x87, 0x42, 0x12, 0xb2, 0xbb, 0x20, 0xe6, 0x9e, 0x74, 0x41, 0x7c, 0x68, 0x05, 0x1a, + 0xfd, 0x3f, 0xaf, 0x40, 0xa5, 0xff, 0x6a, 0xe9, 0xbc, 0x4f, 0x6c, 0xb5, 0x6a, 0x60, 0x8b, 0xc0, + 0x35, 0x30, 0xc3, 0x1e, 0x19, 0x88, 0x78, 0x96, 0x69, 0x60, 0xca, 0x5f, 0xa2, 0x22, 0xe0, 0xd4, + 0x70, 0xa4, 0x9a, 0xe2, 0xa3, 0xae, 0x15, 0xf0, 0x3a, 0x80, 0xa2, 0xf1, 0xee, 0xd0, 0x23, 0x7a, + 0x08, 0xd5, 0x42, 0x57, 0xbb, 0x24, 0x50, 0x8f, 0x55, 0x70, 0x15, 0xcc, 0x5a, 0x78, 0x97, 0x58, + 0x55, 0x62, 0x11, 0x23, 0x70, 0x7d, 0xae, 0x4a, 0xbc, 0xd5, 0xe7, 0xda, 0xad, 0xe2, 0xec, 0x8d, + 0x34, 0x13, 0x75, 0xcb, 0x97, 0x16, 0xd3, 0xe9, 0x95, 0xdc, 0xb8, 0x78, 0xce, 0xfc, 0x2e, 0x03, + 0xe6, 0xfb, 0x47, 0x06, 0x7c, 0x2f, 0x7e, 0x75, 0x89, 0xa6, 0xfa, 0xad, 0x61, 0x45, 0xa1, 0x7c, + 0x76, 0x81, 0xee, 0x27, 0x17, 0xfc, 0x3e, 0xeb, 0x70, 0xb0, 0x15, 0x4d, 0x63, 0xde, 0x1c, 0x9a, + 0x09, 0x0c, 0xa4, 0x32, 0x2e, 0x9a, 0x27, 0x6c, 0xf1, 0x5e, 0x09, 0x5b, 0xa4, 0xf4, 0x27, 0x2d, + 0xfd, 0xf0, 0x8e, 0x33, 0x18, 0xfe, 0x4c, 0x03, 0xd3, 0xae, 0x47, 0x9c, 0x95, 0x9d, 0x8d, 0xdb, + 0x5f, 0x11, 0x99, 0x2c, 0x5d, 0xb5, 0xf5, 0x98, 0x76, 0x5e, 0xaf, 0x6e, 0x6f, 0x09, 0x85, 0x3b, + 0xbe, 0xeb, 0xd1, 0xca, 0x99, 0x76, 0xab, 0x38, 0xbd, 0xdd, 0x09, 0x85, 0xd2, 0xd8, 0x25, 0x1b, + 0xcc, 0xad, 0x1f, 0x06, 0xc4, 0x77, 0xb0, 0xb5, 0xe6, 0x1a, 0xa1, 0x4d, 0x9c, 0x40, 0x18, 0x9a, + 0x1a, 0xe5, 0x68, 0x27, 0x1c, 0xe5, 0x5c, 0x00, 0xd9, 0xd0, 0xb7, 0x64, 0x14, 0x4f, 0xa8, 0x51, + 0x25, 0xba, 0x81, 0x18, 0xbd, 0xb4, 0x08, 0x46, 0x98, 0x9d, 0xf0, 0x1c, 0xc8, 0xfa, 0xf8, 0x80, + 0x6b, 0x9d, 0xac, 0x8c, 0x31, 0x11, 0x84, 0x0f, 0x10, 0xa3, 0x95, 0xfe, 0x75, 0x1e, 0x4c, 0xa7, + 0xf6, 0x02, 0xe7, 0x41, 0x46, 0xcd, 0x3f, 0x81, 0x54, 0x9a, 0xd9, 0x58, 0x43, 0x19, 0xb3, 0x06, + 0x5f, 0x54, 0xc5, 0x57, 0x80, 0x16, 0x55, 0x3d, 0xe7, 0x54, 0xd6, 0xd2, 0xc6, 0xea, 0x98, 0x21, + 0x51, 0xe1, 0x64, 0x36, 0x90, 0xba, 0xcc, 0x12, 0x61, 0x03, 0xa9, 0x23, 0x46, 0xfb, 0xac, 0x73, + 0xac, 0x68, 0x90, 0x96, 0x3b, 0xc1, 0x20, 0x6d, 0xf4, 0xa1, 0x83, 0xb4, 0x8b, 0x20, 0x17, 0x98, + 0x81, 0x45, 0xf4, 0xb1, 0xce, 0x97, 0xc7, 0x4d, 0x46, 0x44, 0x82, 0x07, 0xef, 0x80, 0xb1, 0x1a, + 0xa9, 0xe3, 0xd0, 0x0a, 0xf4, 0x3c, 0x0f, 0xa1, 0xd5, 0x01, 0x84, 0x90, 0x98, 0x72, 0xae, 0x09, + 0xbd, 0x28, 0x02, 0x80, 0x97, 0xc0, 0x98, 0x8d, 0x0f, 0x4d, 0x3b, 0xb4, 0x79, 0x4f, 0xa6, 0x09, + 0xb1, 0x4d, 0x41, 0x42, 0x11, 0x8f, 0x55, 0x46, 0x72, 0x68, 0x58, 0x21, 0x35, 0x9b, 0x44, 0x32, + 0x75, 0xc0, 0x6f, 0x4f, 0x55, 0x19, 0xd7, 0x53, 0x7c, 0xd4, 0xb5, 0x82, 0x83, 0x99, 0x0e, 0x5f, + 0x3c, 0x91, 0x00, 0x13, 0x24, 0x14, 0xf1, 0x3a, 0xc1, 0xa4, 0xfc, 0x64, 0x3f, 0x30, 0xb9, 0xb8, + 0x6b, 0x05, 0xfc, 0x32, 0x18, 0xb7, 0xf1, 0xe1, 0x0d, 0xe2, 0x34, 0x82, 0x3d, 0x7d, 0x6a, 0x41, + 0x5b, 0xca, 0x56, 0xa6, 0xda, 0xad, 0xe2, 0xf8, 0x66, 0x44, 0x44, 0x31, 0x9f, 0x0b, 0x9b, 0x8e, + 0x14, 0x3e, 0x9d, 0x10, 0x8e, 0x88, 0x28, 0xe6, 0xb3, 0x0e, 0xc2, 0xc3, 0x01, 0x4b, 0x2e, 0x7d, + 0xba, 0xf3, 0x65, 0xb8, 0x23, 0xc8, 0x28, 0xe2, 0xc3, 0x25, 0x90, 0xb7, 0xf1, 0x21, 0x7f, 0xc5, + 0xeb, 0x33, 0x5c, 0x2d, 0x9f, 0xf8, 0x6e, 0x4a, 0x1a, 0x52, 0x5c, 0x2e, 0x69, 0x3a, 0x42, 0x72, + 0x36, 0x21, 0x29, 0x69, 0x48, 0x71, 0x59, 0x10, 0x87, 0x8e, 0x79, 0x37, 0x24, 0x42, 0x18, 0x72, + 0xcf, 0xa8, 0x20, 0xbe, 0x15, 0xb3, 0x50, 0x52, 0x8e, 0xbd, 0xa2, 0xed, 0xd0, 0x0a, 0x4c, 0xcf, + 0x22, 0xdb, 0x75, 0xfd, 0x0c, 0xf7, 0x3f, 0xef, 0x93, 0x37, 0x15, 0x15, 0x25, 0x24, 0x20, 0x01, + 0x23, 0xc4, 0x09, 0x6d, 0xfd, 0x29, 0x7e, 0xb1, 0x0f, 0x24, 0x04, 0x55, 0xe6, 0xac, 0x3b, 0xa1, + 0x8d, 0xb8, 0x7a, 0xf8, 0x22, 0x98, 0xb2, 0xf1, 0x21, 0x2b, 0x07, 0xc4, 0x0f, 0xd8, 0xfb, 0x7e, + 0x8e, 0x6f, 0x7e, 0x96, 0x75, 0x9c, 0x9b, 0x49, 0x06, 0xea, 0x94, 0xe3, 0x0b, 0x4d, 0x27, 0xb1, + 0xf0, 0x6c, 0x62, 0x61, 0x92, 0x81, 0x3a, 0xe5, 0x98, 0xa7, 0x7d, 0x72, 0x37, 0x34, 0x7d, 0x52, + 0xd3, 0x9f, 0xe6, 0x4d, 0xaa, 0x9c, 0xc2, 0x0b, 0x1a, 0x52, 0x5c, 0xd8, 0x8c, 0xc6, 0x3d, 0x3a, + 0x4f, 0xc3, 0x5b, 0x83, 0xad, 0xe4, 0xdb, 0xfe, 0x8a, 0xef, 0xe3, 0x23, 0x71, 0xd3, 0x24, 0x07, + 0x3d, 0x90, 0x82, 0x1c, 0xb6, 0xac, 0xed, 0xba, 0x7e, 0x8e, 0xfb, 0x7e, 0xd0, 0x37, 0x88, 0xaa, + 0x3a, 0x2b, 0x0c, 0x04, 0x09, 0x2c, 0x06, 0xea, 0x3a, 0x2c, 0x34, 0xe6, 0x87, 0x0b, 0xba, 0xcd, + 0x40, 0x90, 0xc0, 0xe2, 0x3b, 0x75, 0x8e, 0xb6, 0xeb, 0xfa, 0x17, 0x86, 0xbc, 0x53, 0x06, 0x82, + 0x04, 0x16, 0x34, 0x41, 0xd6, 0x71, 0x03, 0xfd, 0xfc, 0x50, 0xae, 0x67, 0x7e, 0xe1, 0x6c, 0xb9, + 0x01, 0x62, 0x18, 0xf0, 0x97, 0x1a, 0x00, 0x5e, 0x1c, 0xa2, 0x17, 0x06, 0x32, 0x45, 0x48, 0x41, + 0x96, 0xe3, 0xd8, 0x5e, 0x77, 0x02, 0xff, 0x28, 0x7e, 0x47, 0x26, 0x72, 0x20, 0x61, 0x05, 0xfc, + 0xbd, 0x06, 0x9e, 0x4a, 0xb6, 0xc9, 0xca, 0xbc, 0x02, 0xf7, 0xc8, 0xcd, 0x41, 0x87, 0x79, 0xc5, + 0x75, 0xad, 0x8a, 0xde, 0x6e, 0x15, 0x9f, 0x5a, 0xe9, 0x81, 0x8a, 0x7a, 0xda, 0x02, 0xff, 0xac, + 0x81, 0x59, 0x59, 0x45, 0x13, 0x16, 0x16, 0xb9, 0x03, 0xc9, 0xa0, 0x1d, 0x98, 0xc6, 0x11, 0x7e, + 0x54, 0xbf, 0x1e, 0x77, 0xf1, 0x51, 0xb7, 0x69, 0xf0, 0x6f, 0x1a, 0x98, 0xac, 0x11, 0x8f, 0x38, + 0x35, 0xe2, 0x18, 0xcc, 0xd6, 0x85, 0x81, 0x8c, 0x0d, 0xd2, 0xb6, 0xae, 0x25, 0x20, 0x84, 0x99, + 0x65, 0x69, 0xe6, 0x64, 0x92, 0x75, 0xdc, 0x2a, 0x9e, 0x8d, 0x97, 0x26, 0x39, 0xa8, 0xc3, 0x4a, + 0xf8, 0x91, 0x06, 0xa6, 0xe3, 0x03, 0x10, 0x57, 0xca, 0xe2, 0x10, 0xe3, 0x80, 0xb7, 0xaf, 0x2b, + 0x9d, 0x80, 0x28, 0x6d, 0x01, 0xfc, 0x8b, 0xc6, 0x3a, 0xb5, 0xe8, 0xdd, 0x47, 0xf5, 0x12, 0xf7, + 0xe5, 0xdb, 0x03, 0xf7, 0xa5, 0x42, 0x10, 0xae, 0xbc, 0x1c, 0xb7, 0x82, 0x8a, 0x73, 0xdc, 0x2a, + 0xce, 0x25, 0x3d, 0xa9, 0x18, 0x28, 0x69, 0x21, 0xfc, 0x89, 0x06, 0x26, 0x49, 0xdc, 0x71, 0x53, + 0xfd, 0xe2, 0x40, 0x9c, 0xd8, 0xb3, 0x89, 0x17, 0x2f, 0xf5, 0x04, 0x8b, 0xa2, 0x0e, 0x6c, 0xd6, + 0x41, 0x92, 0x43, 0x6c, 0x7b, 0x16, 0xd1, 0xbf, 0x38, 0xe0, 0x0e, 0x72, 0x5d, 0xe8, 0x45, 0x11, + 0x00, 0xbc, 0x0c, 0xf2, 0x4e, 0x68, 0x59, 0x78, 0xd7, 0x22, 0xfa, 0x25, 0xde, 0x8b, 0xa8, 0x29, + 0xe6, 0x96, 0xa4, 0x23, 0x25, 0x31, 0xcf, 0xde, 0x49, 0xa9, 0x3c, 0x83, 0x33, 0x20, 0xbb, 0x4f, + 0xe4, 0x8f, 0xb6, 0x88, 0xfd, 0x09, 0x6b, 0x20, 0xd7, 0xc4, 0x56, 0x18, 0x3d, 0xf5, 0x06, 0x5c, + 0xa3, 0x91, 0x50, 0xfe, 0x72, 0xe6, 0x25, 0x6d, 0xfe, 0x9e, 0x06, 0xce, 0xf6, 0x4e, 0xff, 0x27, + 0x6a, 0xd6, 0x6f, 0x34, 0x30, 0xdb, 0x95, 0xe9, 0x3d, 0x2c, 0xba, 0xdb, 0x69, 0xd1, 0xeb, 0x83, + 0x4e, 0xd9, 0x6a, 0xe0, 0x9b, 0x4e, 0x83, 0xf7, 0x29, 0x49, 0xf3, 0x7e, 0xae, 0x81, 0x99, 0x74, + 0xf2, 0x3c, 0x49, 0x7f, 0x95, 0xee, 0x65, 0xc0, 0xd9, 0xde, 0xed, 0x15, 0xf4, 0xd5, 0x3b, 0x72, + 0x38, 0xef, 0xf1, 0x5e, 0xb3, 0xbb, 0x0f, 0x34, 0x30, 0x71, 0x47, 0xc9, 0x45, 0x3f, 0x17, 0x0e, + 0x7c, 0x12, 0x10, 0x55, 0xab, 0x98, 0x41, 0x51, 0x12, 0xb7, 0xf4, 0x57, 0x0d, 0xcc, 0xf5, 0x2c, + 0xc3, 0xec, 0xc1, 0x8a, 0x2d, 0xcb, 0x3d, 0x10, 0x03, 0x9d, 0xc4, 0xb4, 0x74, 0x85, 0x53, 0x91, + 0xe4, 0x26, 0xbc, 0x97, 0xf9, 0xbc, 0xbc, 0x57, 0xfa, 0x87, 0x06, 0xce, 0x3f, 0x2c, 0x12, 0x9f, + 0xc8, 0x91, 0x2e, 0x81, 0xbc, 0x6c, 0xa1, 0x8e, 0xf8, 0x71, 0xca, 0x57, 0x83, 0x2c, 0x1a, 0xfc, + 0xff, 0x58, 0xc4, 0x5f, 0xa5, 0xf7, 0x35, 0x30, 0x53, 0x25, 0x7e, 0xd3, 0x34, 0x08, 0x22, 0x75, + 0xe2, 0x13, 0xc7, 0x20, 0x70, 0x19, 0x8c, 0xf3, 0xdf, 0xe9, 0x3c, 0x6c, 0x44, 0x43, 0xec, 0x59, + 0xe9, 0xf2, 0xf1, 0xad, 0x88, 0x81, 0x62, 0x19, 0x35, 0xf0, 0xce, 0xf4, 0x1d, 0x78, 0x9f, 0x07, + 0x23, 0x5e, 0x3c, 0x0e, 0xcc, 0x33, 0x2e, 0x9f, 0x00, 0x72, 0x6a, 0xe9, 0x9f, 0x1a, 0xe8, 0xf5, + 0x3f, 0x25, 0xb0, 0x09, 0xc6, 0xa8, 0x30, 0x4e, 0x3a, 0x6f, 0xfb, 0x31, 0x9d, 0x97, 0xde, 0xaa, + 0xb8, 0x26, 0x22, 0x6a, 0x04, 0xc6, 0xfc, 0x67, 0xe0, 0x4a, 0xe8, 0xd4, 0xe4, 0x00, 0x6f, 0x52, + 0xf8, 0x6f, 0x75, 0x45, 0xd0, 0x90, 0xe2, 0xc2, 0x73, 0x62, 0xd4, 0x94, 0x98, 0xdf, 0x44, 0x63, + 0xa6, 0xca, 0x95, 0xfb, 0x0f, 0x0a, 0xa7, 0x3e, 0x7e, 0x50, 0x38, 0xf5, 0xc9, 0x83, 0xc2, 0xa9, + 0x1f, 0xb4, 0x0b, 0xda, 0xfd, 0x76, 0x41, 0xfb, 0xb8, 0x5d, 0xd0, 0x3e, 0x69, 0x17, 0xb4, 0x7f, + 0xb7, 0x0b, 0xda, 0x2f, 0x3e, 0x2d, 0x9c, 0xfa, 0xce, 0x98, 0x34, 0xed, 0x7f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x3a, 0x56, 0xbb, 0xc0, 0xe9, 0x29, 0x00, 0x00, } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go index 77f849975f..97bc5431cc 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go @@ -48,6 +48,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CustomResourceDefinition{}, &CustomResourceDefinitionList{}, + &ConversionReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index cab705d927..e99d9e49b5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -18,6 +18,18 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// ConversionStrategyType describes different conversion types. +type ConversionStrategyType string + +const ( + // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. + NoneConverter ConversionStrategyType = "None" + // WebhookConverter is a converter that calls to an external webhook to convert the CR. + WebhookConverter ConversionStrategyType = "Webhook" ) // CustomResourceDefinitionSpec describes how a user wants their resource to appear @@ -35,9 +47,13 @@ type CustomResourceDefinitionSpec struct { // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` // Validation describes the validation methods for CustomResources + // Optional, the global validation schema for all versions. + // Top-level and per-version schemas are mutually exclusive. // +optional Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` - // Subresources describes the subresources for CustomResources + // Subresources describes the subresources for CustomResource + // Optional, the global subresources for all versions. + // Top-level and per-version subresources are mutually exclusive. // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` // Versions is the list of all supported versions for this resource. @@ -54,10 +70,93 @@ type CustomResourceDefinitionSpec struct { // +optional Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // Optional, the global columns for all versions. + // Top-level and per-version columns are mutually exclusive. // +optional AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` + + // `conversion` defines conversion settings for the CRD. + // +optional + Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` +} + +// CustomResourceConversion describes how to convert different versions of a CR. +type CustomResourceConversion struct { + // `strategy` specifies the conversion strategy. Allowed values are: + // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. + // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information is needed for this option. + Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` + + // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. This field is + // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook. It has the same field as admissionregistration.v1beta1.WebhookClientConfig. +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // Port 443 will be used if it is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` } +// CustomResourceDefinitionVersion describes a version for CRD. type CustomResourceDefinitionVersion struct { // Name is the version name, e.g. “v1”, “v2beta1”, etc. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -66,6 +165,27 @@ type CustomResourceDefinitionVersion struct { // Storage flags the version as storage version. There must be exactly one // flagged as storage version. Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` + // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. + // Top-level and per-version schemas are mutually exclusive. + // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` + // Subresources describes the subresources for CustomResource + // Top-level and per-version subresources are mutually exclusive. + // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // +optional + Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + // Top-level and per-version columns are mutually exclusive. + // Per-version columns must not all be set to identical values (top-level columns should be used instead) + // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. + // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an + // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must + // be explicitly set to null + // +optional + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` } // CustomResourceColumnDefinition specifies a column for server side printing. @@ -263,3 +383,46 @@ type CustomResourceSubresourceScale struct { // +optional LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConversionReview describes a conversion request/response. +type ConversionReview struct { + metav1.TypeMeta `json:",inline"` + // `request` describes the attributes for the conversion request. + // +optional + Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // `response` describes the attributes for the conversion response. + // +optional + Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// ConversionRequest describes the conversion request parameters. +type ConversionRequest struct { + // `uid` is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // `desiredAPIVersion` is the version to convert given objects to. e.g. "myapi.example.com/v1" + DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` + // `objects` is the list of CR objects to be converted. + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` +} + +// ConversionResponse describes a conversion response. +type ConversionResponse struct { + // `uid` is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` + // `convertedObjects` is the list of converted version of `request.objects` if the `result` is successful otherwise empty. + // The webhook is expected to set apiVersion of these objects to the ConversionRequest.desiredAPIVersion. The list + // must also has the same size as input list with the same objects in the same order(i.e. equal UIDs and object meta) + ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` + // `result` contains the result of conversion with extra details if the conversion failed. `result.status` determines if + // the conversion failed or succeeded. The `result.status` field is required and represent the success or failure of the + // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set + // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` + // will be used to construct an error message for the end user. + Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index 9ac8b9c0bf..86485cef95 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -45,6 +45,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) }); err != nil { @@ -215,6 +225,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) }); err != nil { @@ -263,6 +293,28 @@ func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResou return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in, out, s) } +func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) + out.WebhookClientConfig = (*apiextensions.WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + return nil +} + +// Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + out.Strategy = ConversionStrategyType(in.Strategy) + out.WebhookClientConfig = (*WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + return nil +} + +// Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in, out, s) +} + func autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { @@ -412,8 +464,19 @@ func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomRes out.Validation = nil } out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - out.Versions = *(*[]apiextensions.CustomResourceDefinitionVersion)(unsafe.Pointer(&in.Versions)) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.Conversion = (*apiextensions.CustomResourceConversion)(unsafe.Pointer(in.Conversion)) return nil } @@ -439,8 +502,19 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomRes out.Validation = nil } out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - out.Versions = *(*[]CustomResourceDefinitionVersion)(unsafe.Pointer(&in.Versions)) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + for i := range *in { + if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil + } out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) + out.Conversion = (*CustomResourceConversion)(unsafe.Pointer(in.Conversion)) return nil } @@ -481,6 +555,17 @@ func autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_Custom out.Name = in.Name out.Served = in.Served out.Storage = in.Storage + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(apiextensions.CustomResourceValidation) + if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) return nil } @@ -493,6 +578,17 @@ func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_Custom out.Name = in.Name out.Served = in.Served out.Storage = in.Storage + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { + return err + } + } else { + out.Schema = nil + } + out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) return nil } @@ -1125,3 +1221,51 @@ func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchem func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in, out, s) } + +func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + return nil +} + +// Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. +func Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { + return autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) +} + +func autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + return nil +} + +// Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. +func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in, out, s) +} + +func autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + out.Service = (*apiextensions.ServiceReference)(unsafe.Pointer(in.Service)) + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) +} + +func autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + out.Service = (*ServiceReference)(unsafe.Pointer(in.Service)) + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function. +func Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { + return autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 5e14efbc84..8dd7a87bfc 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -24,6 +24,88 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. +func (in *ConversionRequest) DeepCopy() *ConversionRequest { + if in == nil { + return nil + } + out := new(ConversionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { + *out = *in + if in.ConvertedObjects != nil { + in, out := &in.ConvertedObjects, &out.ConvertedObjects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Result.DeepCopyInto(&out.Result) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. +func (in *ConversionResponse) DeepCopy() *ConversionResponse { + if in == nil { + return nil + } + out := new(ConversionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(ConversionRequest) + (*in).DeepCopyInto(*out) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(ConversionResponse) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. +func (in *ConversionReview) DeepCopy() *ConversionReview { + if in == nil { + return nil + } + out := new(ConversionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConversionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { *out = *in @@ -40,6 +122,27 @@ func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefini return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { *out = *in @@ -161,13 +264,20 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti if in.Versions != nil { in, out := &in.Versions, &out.Versions *out = make([]CustomResourceDefinitionVersion, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.AdditionalPrinterColumns != nil { in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } return } @@ -213,6 +323,21 @@ func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionSt // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } return } @@ -468,3 +593,55 @@ func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index 50e7ee880e..5a01307539 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -40,6 +40,27 @@ func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefini return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { + *out = *in + if in.WebhookClientConfig != nil { + in, out := &in.WebhookClientConfig, &out.WebhookClientConfig + *out = new(WebhookClientConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. +func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { + if in == nil { + return nil + } + out := new(CustomResourceConversion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { *out = *in @@ -161,13 +182,20 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti if in.Versions != nil { in, out := &in.Versions, &out.Versions *out = make([]CustomResourceDefinitionVersion, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.AdditionalPrinterColumns != nil { in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns *out = make([]CustomResourceColumnDefinition, len(*in)) copy(*out, *in) } + if in.Conversion != nil { + in, out := &in.Conversion, &out.Conversion + *out = new(CustomResourceConversion) + (*in).DeepCopyInto(*out) + } return } @@ -213,6 +241,21 @@ func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionSt // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = new(CustomResourceValidation) + (*in).DeepCopyInto(*out) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = new(CustomResourceSubresources) + (*in).DeepCopyInto(*out) + } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } return } @@ -447,3 +490,55 @@ func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index f25a6ce345..c925313a7c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -19,6 +19,8 @@ limitations under the License. package v1beta1 import ( + "time" + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -73,10 +75,15 @@ func (c *customResourceDefinitions) Get(name string, options v1.GetOptions) (res // List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. func (c *customResourceDefinitions) List(opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } result = &v1beta1.CustomResourceDefinitionList{} err = c.client.Get(). Resource("customresourcedefinitions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Do(). Into(result) return @@ -84,10 +91,15 @@ func (c *customResourceDefinitions) List(opts v1.ListOptions) (result *v1beta1.C // Watch returns a watch.Interface that watches the requested customResourceDefinitions. func (c *customResourceDefinitions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } opts.Watch = true return c.client.Get(). Resource("customresourcedefinitions"). VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). Watch() } @@ -141,9 +153,14 @@ func (c *customResourceDefinitions) Delete(name string, options *v1.DeleteOption // DeleteCollection deletes a collection of objects. func (c *customResourceDefinitions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } return c.client.Delete(). Resource("customresourcedefinitions"). VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/sigs.k8s.io/controller-tools/LICENSE b/vendor/sigs.k8s.io/controller-tools/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go new file mode 100644 index 0000000000..f8b98fd1d5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "fmt" + "log" + "os" + "path" + "strings" + + "github.com/ghodss/yaml" + "github.com/spf13/afero" + extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/gengo/args" + "k8s.io/gengo/types" + crdutil "sigs.k8s.io/controller-tools/pkg/crd/util" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" + "sigs.k8s.io/controller-tools/pkg/internal/codegen/parse" + "sigs.k8s.io/controller-tools/pkg/util" +) + +// Generator generates CRD manifests from API resource definitions defined in Go source files. +type Generator struct { + RootPath string + OutputDir string + Repo string + Domain string + Namespace string + SkipMapValidation bool + + // OutFs is filesystem to be used for writing out the result + OutFs afero.Fs + + // apisPkg is the absolute Go pkg name for current project's 'pkg/apis' pkg. + // This is needed to determine if a Type belongs to the project or it is a referred Type. + apisPkg string + + // APIsPath and APIsPkg allow customized generation for Go types existing under directories other than pkg/apis + APIsPath string + APIsPkg string +} + +// ValidateAndInitFields validate and init generator fields. +func (c *Generator) ValidateAndInitFields() error { + var err error + + if c.OutFs == nil { + c.OutFs = afero.NewOsFs() + } + + if len(c.RootPath) == 0 { + // Take current path as root path if not specified. + c.RootPath, err = os.Getwd() + if err != nil { + return err + } + } + + // Validate PROJECT file if Domain or Repo are not set manually + if len(c.Domain) == 0 || len(c.Repo) == 0 { + if !crdutil.PathHasProjectFile(c.RootPath) { + return fmt.Errorf("PROJECT file missing in dir %s", c.RootPath) + } + } + + if len(c.Repo) == 0 { + c.Repo = crdutil.GetRepoFromProject(c.RootPath) + } + + // If Domain is not explicitly specified, + // try to search for PROJECT file as a basis. + if len(c.Domain) == 0 { + c.Domain = crdutil.GetDomainFromProject(c.RootPath) + } + + err = c.setAPIsPkg() + if err != nil { + return err + } + + // Init output directory + if c.OutputDir == "" { + c.OutputDir = path.Join(c.RootPath, "config/crds") + } + + return nil +} + +// Do manages CRD generation. +func (c *Generator) Do() error { + arguments := args.Default() + b, err := arguments.NewBuilder() + if err != nil { + return fmt.Errorf("failed making a parser: %v", err) + } + + // Switch working directory to root path. + wd, err := os.Getwd() + if err != nil { + return err + } + if err := os.Chdir(c.RootPath); err != nil { + return fmt.Errorf("failed switching working dir: %v", err) + } + defer func() { + if err := os.Chdir(wd); err != nil { + log.Fatalf("Failed to switch back to original working dir: %v", err) + } + }() + + if err := b.AddDirRecursive(fmt.Sprintf("%s/%s", c.Repo, c.APIsPath)); err != nil { + return fmt.Errorf("failed making a parser: %v", err) + } + + ctx, err := parse.NewContext(b) + if err != nil { + return fmt.Errorf("failed making a context: %v", err) + } + + arguments.CustomArgs = &parse.Options{SkipMapValidation: c.SkipMapValidation} + + // TODO: find an elegant way to fulfill the domain in APIs. + p := parse.NewAPIs(ctx, arguments, c.Domain, c.apisPkg) + crds := c.getCrds(p) + + return c.writeCRDs(crds) +} + +func (c *Generator) writeCRDs(crds map[string][]byte) error { + // Ensure output dir exists. + if err := c.OutFs.MkdirAll(c.OutputDir, os.FileMode(0700)); err != nil { + return err + } + + for file, crd := range crds { + outFile := path.Join(c.OutputDir, file) + if err := (&util.FileWriter{Fs: c.OutFs}).WriteFile(outFile, crd); err != nil { + return err + } + } + return nil +} + +func getCRDFileName(resource *codegen.APIResource) string { + elems := []string{resource.Group, resource.Version, strings.ToLower(resource.Kind)} + return strings.Join(elems, "_") + ".yaml" +} + +func (c *Generator) getCrds(p *parse.APIs) map[string][]byte { + crds := map[string]extensionsv1beta1.CustomResourceDefinition{} + for _, g := range p.APIs.Groups { + for _, v := range g.Versions { + for _, r := range v.Resources { + crd := r.CRD + // ignore types which do not belong to this project + if !c.belongsToAPIsPkg(r.Type) { + continue + } + if len(c.Namespace) > 0 { + crd.Namespace = c.Namespace + } + fileName := getCRDFileName(r) + crds[fileName] = crd + } + } + } + + result := map[string][]byte{} + for file, crd := range crds { + b, err := yaml.Marshal(crd) + if err != nil { + log.Fatalf("Error: %v", err) + } + result[file] = b + } + + return result +} + +// belongsToAPIsPkg returns true if type t is defined under pkg/apis pkg of +// current project. +func (c *Generator) belongsToAPIsPkg(t *types.Type) bool { + return strings.HasPrefix(t.Name.Package, c.apisPkg) +} + +func (c *Generator) setAPIsPkg() error { + if c.APIsPath == "" { + c.APIsPath = "pkg/apis" + } + + c.apisPkg = c.APIsPkg + if c.apisPkg == "" { + // Validate apis directory exists under working path + apisPath := path.Join(c.RootPath, c.APIsPath) + if _, err := os.Stat(apisPath); err != nil { + return fmt.Errorf("error validating apis path %s: %v", apisPath, err) + } + + c.apisPkg = path.Join(c.Repo, c.APIsPath) + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go new file mode 100644 index 0000000000..559e602278 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bufio" + "fmt" + gobuild "go/build" + "log" + "os" + "path" + "path/filepath" + "strings" +) + +// IsGoSrcPath validate if given path is of path $GOPATH/src. +func IsGoSrcPath(filePath string) bool { + for _, gopath := range getGoPaths() { + goSrc := path.Join(gopath, "src") + if filePath == goSrc { + return true + } + } + + return false +} + +// IsUnderGoSrcPath validate if given path is under path $GOPATH/src. +func IsUnderGoSrcPath(filePath string) bool { + for _, gopath := range getGoPaths() { + goSrc := path.Join(gopath, "src") + if strings.HasPrefix(filepath.Dir(filePath), goSrc) { + return true + } + } + + return false +} + +// DirToGoPkg returns the Gopkg for the given directory if it exists +// under a GOPATH otherwise returns error. For example, +// /Users/x/go/src/github.com/y/z ==> github.com/y/z +func DirToGoPkg(dir string) (pkg string, err error) { + goPaths := getGoPaths() + for _, gopath := range goPaths { + goSrc := path.Join(gopath, "src") + if !strings.HasPrefix(dir, goSrc) { + continue + } + pkg, err := filepath.Rel(goSrc, dir) + if err == nil { + return pkg, err + } + } + + return "", fmt.Errorf("dir '%s' does not exist under any GOPATH %v", dir, goPaths) +} + +func getGoPaths() []string { + gopaths := os.Getenv("GOPATH") + if len(gopaths) == 0 { + gopaths = gobuild.Default.GOPATH + } + return filepath.SplitList(gopaths) +} + +// PathHasProjectFile validate if PROJECT file exists under the path. +func PathHasProjectFile(filePath string) bool { + if _, err := os.Stat(path.Join(filePath, "PROJECT")); os.IsNotExist(err) { + return false + } + + return true +} + +// GetDomainFromProject get domain information from the PROJECT file under the path. +func GetDomainFromProject(rootPath string) string { + return GetFieldFromProject("domain", rootPath) +} + +// GetRepoFromProject get domain information from the PROJECT file under the path. +func GetRepoFromProject(rootPath string) string { + return GetFieldFromProject("repo", rootPath) +} + +// GetFieldFromProject get field information from the PROJECT file under the path. +func GetFieldFromProject(fieldKey string, rootPath string) string { + var fieldVal string + + file, err := os.Open(path.Join(rootPath, "PROJECT")) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := file.Close(); err != nil { + log.Fatal(err) + } + }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), fmt.Sprintf("%s:", fieldKey)) { + fieldInfo := strings.Split(scanner.Text(), ":") + if len(fieldInfo) != 2 { + log.Fatalf("Unexpected %s info: %s", fieldKey, scanner.Text()) + } + fieldVal = strings.Replace(fieldInfo[1], " ", "", -1) + break + } + } + if len(fieldVal) == 0 { + log.Fatalf("%s/PROJECT file is missing value for '%s'", rootPath, fieldKey) + } + + return fieldVal +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go new file mode 100644 index 0000000000..c953b4b3b6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go @@ -0,0 +1,287 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "fmt" + "path" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/types" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" +) + +type genUnversionedType struct { + Type *types.Type + Resource *codegen.APIResource +} + +func (b *APIs) parseAPIs() { + apis := &codegen.APIs{ + Domain: b.Domain, + Package: b.APIsPkg, + Groups: map[string]*codegen.APIGroup{}, + Rules: b.Rules, + Informers: b.Informers, + } + + for group, versionMap := range b.ByGroupVersionKind { + apiGroup := &codegen.APIGroup{ + Group: group, + GroupTitle: strings.Title(group), + Domain: b.Domain, + Versions: map[string]*codegen.APIVersion{}, + UnversionedResources: map[string]*codegen.APIResource{}, + } + + for version, kindMap := range versionMap { + apiVersion := &codegen.APIVersion{ + Domain: b.Domain, + Group: group, + Version: version, + Resources: map[string]*codegen.APIResource{}, + } + for kind, resource := range kindMap { + apiResource := &codegen.APIResource{ + Domain: resource.Domain, + Version: resource.Version, + Group: resource.Group, + Resource: resource.Resource, + Type: resource.Type, + REST: resource.REST, + Kind: resource.Kind, + Subresources: resource.Subresources, + StatusStrategy: resource.StatusStrategy, + Strategy: resource.Strategy, + NonNamespaced: resource.NonNamespaced, + ShortName: resource.ShortName, + } + parseDoc(resource, apiResource) + apiVersion.Resources[kind] = apiResource + // Set the package for the api version + apiVersion.Pkg = b.context.Universe[resource.Type.Name.Package] + // Set the package for the api group + apiGroup.Pkg = b.context.Universe[filepath.Dir(resource.Type.Name.Package)] + if apiGroup.Pkg != nil { + apiGroup.PkgPath = apiGroup.Pkg.Path + } + + apiGroup.UnversionedResources[kind] = apiResource + } + + apiGroup.Versions[version] = apiVersion + } + b.parseStructs(apiGroup) + apis.Groups[group] = apiGroup + } + apis.Pkg = b.context.Universe[b.APIsPkg] + b.APIs = apis +} + +func (b *APIs) parseStructs(apigroup *codegen.APIGroup) { + remaining := []genUnversionedType{} + for _, version := range apigroup.Versions { + for _, resource := range version.Resources { + remaining = append(remaining, genUnversionedType{resource.Type, resource}) + } + } + for _, version := range b.SubByGroupVersionKind[apigroup.Group] { + for _, kind := range version { + remaining = append(remaining, genUnversionedType{kind, nil}) + } + } + + done := sets.String{} + for len(remaining) > 0 { + // Pop the next element from the list + next := remaining[0] + remaining[0] = remaining[len(remaining)-1] + remaining = remaining[:len(remaining)-1] + + // Already processed this type. Skip it + if done.Has(next.Type.Name.Name) { + continue + } + done.Insert(next.Type.Name.Name) + + // Generate the struct and append to the list + result, additionalTypes := parseType(next.Type) + + // This is a resource, so generate the client + if b.genClient(next.Type) { + result.GenClient = true + result.GenDeepCopy = true + } + + if next.Resource != nil { + result.NonNamespaced = IsNonNamespaced(next.Type) + } + + if b.genDeepCopy(next.Type) { + result.GenDeepCopy = true + } + apigroup.Structs = append(apigroup.Structs, result) + + // Add the newly discovered subtypes + for _, at := range additionalTypes { + remaining = append(remaining, genUnversionedType{at, nil}) + } + } +} + +// parseType parses the type into a Struct, and returns a list of types that +// need to be parsed +func parseType(t *types.Type) (*codegen.Struct, []*types.Type) { + remaining := []*types.Type{} + + s := &codegen.Struct{ + Name: t.Name.Name, + GenClient: false, + GenUnversioned: true, // Generate unversioned structs by default + } + + for _, c := range t.CommentLines { + if strings.Contains(c, "+genregister:unversioned=false") { + // Don't generate the unversioned struct + s.GenUnversioned = false + } + } + + for _, member := range t.Members { + uType := member.Type.Name.Name + memberName := member.Name + uImport := "" + + // Use the element type for Pointers, Maps and Slices + mSubType := member.Type + hasElem := false + for mSubType.Elem != nil { + mSubType = mSubType.Elem + hasElem = true + } + if hasElem { + // Strip the package from the field type + uType = strings.Replace(member.Type.String(), mSubType.Name.Package+".", "", 1) + } + + base := filepath.Base(member.Type.String()) + samepkg := t.Name.Package == mSubType.Name.Package + + // If not in the same package, calculate the import pkg + if !samepkg { + parts := strings.Split(base, ".") + if len(parts) > 1 { + // Don't generate unversioned types for core types, just use the versioned types + if strings.HasPrefix(mSubType.Name.Package, "k8s.io/api/") { + // Import the package under an alias so it doesn't conflict with other groups + // having the same version + importAlias := path.Base(path.Dir(mSubType.Name.Package)) + path.Base(mSubType.Name.Package) + uImport = fmt.Sprintf("%s \"%s\"", importAlias, mSubType.Name.Package) + if hasElem { + // Replace the full package with the alias when referring to the type + uType = strings.Replace(member.Type.String(), mSubType.Name.Package, importAlias, 1) + } else { + // Replace the full package with the alias when referring to the type + uType = fmt.Sprintf("%s.%s", importAlias, parts[1]) + } + } else { + switch member.Type.Name.Package { + case "k8s.io/apimachinery/pkg/apis/meta/v1": + // Use versioned types for meta/v1 + uImport = fmt.Sprintf("%s \"%s\"", "metav1", "k8s.io/apimachinery/pkg/apis/meta/v1") + uType = "metav1." + parts[1] + default: + // Use unversioned types for everything else + t := member.Type + + if t.Elem != nil { + // handle Pointers, Maps, Slices + + // We need to parse the package from the Type String + t = t.Elem + str := member.Type.String() + startPkg := strings.LastIndexAny(str, "*]") + endPkg := strings.LastIndexAny(str, ".") + pkg := str[startPkg+1 : endPkg] + name := str[endPkg+1:] + prefix := str[:startPkg+1] + + uImportBase := path.Base(pkg) + uImportName := path.Base(path.Dir(pkg)) + uImportBase + uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) + + uType = prefix + uImportName + "." + name + } else { + // handle non- Pointer, Maps, Slices + pkg := t.Name.Package + name := t.Name.Name + + // Come up with the alias the package is imported under + // Concatenate with directory package to reduce naming collisions + uImportBase := path.Base(pkg) + uImportName := path.Base(path.Dir(pkg)) + uImportBase + + // Create the import statement + uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) + + // Create the field type name - should be . + uType = uImportName + "." + name + } + } + } + } + } + + if member.Embedded { + memberName = "" + } + + s.Fields = append(s.Fields, &codegen.Field{ + Name: memberName, + VersionedPackage: member.Type.Name.Package, + UnversionedImport: uImport, + UnversionedType: uType, + }) + + // Add this member Type for processing if it isn't a primitive and + // is part of the same API group + if !mSubType.IsPrimitive() && GetGroup(mSubType) == GetGroup(t) { + remaining = append(remaining, mSubType) + } + } + return s, remaining +} + +func (b *APIs) genClient(c *types.Type) bool { + comments := Comments(c.CommentLines) + resource := comments.getTag("resource", ":") + comments.getTag("kubebuilder:resource", ":") + return len(resource) > 0 +} + +func (b *APIs) genDeepCopy(c *types.Type) bool { + comments := Comments(c.CommentLines) + return comments.hasTag("subresource-request") +} + +func parseDoc(resource, apiResource *codegen.APIResource) { + if HasDocAnnotation(resource.Type) { + resource.DocAnnotation = getDocAnnotation(resource.Type, "warning", "note") + apiResource.DocAnnotation = resource.DocAnnotation + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go new file mode 100644 index 0000000000..98493540f9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "k8s.io/gengo/generator" + "k8s.io/gengo/namer" + "k8s.io/gengo/parser" +) + +// NewContext returns a new Context from the builder +func NewContext(p *parser.Builder) (*generator.Context, error) { + return generator.NewContext(p, NameSystems(), DefaultNameSystem()) +} + +// DefaultNameSystem returns public by default. +func DefaultNameSystem() string { + return "public" +} + +// NameSystems returns the name system used by the generators in this package. +// e.g. black-magic +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(1), + "raw": namer.NewRawNamer("", nil), + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go new file mode 100644 index 0000000000..e2082b5b7d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go @@ -0,0 +1,656 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "text/template" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/types" +) + +// parseCRDs populates the CRD field of each Group.Version.Resource, +// creating validations using the annotations on type fields. +func (b *APIs) parseCRDs() { + for _, group := range b.APIs.Groups { + for _, version := range group.Versions { + for _, resource := range version.Resources { + if IsAPIResource(resource.Type) { + resource.JSONSchemaProps, resource.Validation = + b.typeToJSONSchemaProps(resource.Type, sets.NewString(), []string{}, true) + + // Note: Drop the Type field at the root level of validation + // schema. Refer to following issue for details. + // https://github.com/kubernetes/kubernetes/issues/65293 + resource.JSONSchemaProps.Type = "" + j, err := json.MarshalIndent(resource.JSONSchemaProps, "", " ") + if err != nil { + log.Fatalf("Could not Marshall validation %v\n", err) + } + resource.ValidationComments = string(j) + + resource.CRD = v1beta1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apiextensions.k8s.io/v1beta1", + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s.%s", resource.Resource, resource.Group, resource.Domain), + Labels: map[string]string{"controller-tools.k8s.io": "1.0"}, + }, + Spec: v1beta1.CustomResourceDefinitionSpec{ + Group: fmt.Sprintf("%s.%s", resource.Group, resource.Domain), + Version: resource.Version, + Names: v1beta1.CustomResourceDefinitionNames{ + Kind: resource.Kind, + Plural: resource.Resource, + }, + Validation: &v1beta1.CustomResourceValidation{ + OpenAPIV3Schema: &resource.JSONSchemaProps, + }, + }, + } + if resource.NonNamespaced { + resource.CRD.Spec.Scope = "Cluster" + } else { + resource.CRD.Spec.Scope = "Namespaced" + } + + if hasCategories(resource.Type) { + categoriesTag := getCategoriesTag(resource.Type) + categories := strings.Split(categoriesTag, ",") + resource.CRD.Spec.Names.Categories = categories + resource.Categories = categories + } + + if hasSingular(resource.Type) { + singularName := getSingularName(resource.Type) + resource.CRD.Spec.Names.Singular = singularName + } + + if hasStatusSubresource(resource.Type) { + if resource.CRD.Spec.Subresources == nil { + resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} + } + resource.CRD.Spec.Subresources.Status = &v1beta1.CustomResourceSubresourceStatus{} + } + + resource.CRD.Status.Conditions = []v1beta1.CustomResourceDefinitionCondition{} + resource.CRD.Status.StoredVersions = []string{} + + if hasScaleSubresource(resource.Type) { + if resource.CRD.Spec.Subresources == nil { + resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} + } + jsonPath, err := parseScaleParams(resource.Type) + if err != nil { + log.Fatalf("failed in parsing CRD, error: %v", err.Error()) + } + resource.CRD.Spec.Subresources.Scale = &v1beta1.CustomResourceSubresourceScale{ + SpecReplicasPath: jsonPath[specReplicasPath], + StatusReplicasPath: jsonPath[statusReplicasPath], + } + labelSelctor, ok := jsonPath[labelSelectorPath] + if ok && labelSelctor != "" { + resource.CRD.Spec.Subresources.Scale.LabelSelectorPath = &labelSelctor + } + } + if hasPrintColumn(resource.Type) { + result, err := parsePrintColumnParams(resource.Type) + if err != nil { + log.Fatalf("failed to parse printcolumn annotations, error: %v", err.Error()) + } + resource.CRD.Spec.AdditionalPrinterColumns = result + } + if len(resource.ShortName) > 0 { + resource.CRD.Spec.Names.ShortNames = strings.Split(resource.ShortName, ";") + } + } + } + } + } +} + +func (b *APIs) getTime() string { + return `v1beta1.JSONSchemaProps{ + Type: "string", + Format: "date-time", +}` +} + +func (b *APIs) getDuration() string { + return `v1beta1.JSONSchemaProps{ + Type: "string", +}` +} + +func (b *APIs) getQuantity() string { + return `v1beta1.JSONSchemaProps{ + Type: "string", +}` +} + +func (b *APIs) objSchema() string { + return `v1beta1.JSONSchemaProps{ + Type: "object", +}` +} + +// typeToJSONSchemaProps returns a JSONSchemaProps object and its serialization +// in Go that describe the JSONSchema validations for the given type. +func (b *APIs) typeToJSONSchemaProps(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { + // Special cases + time := types.Name{Name: "Time", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} + duration := types.Name{Name: "Duration", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} + quantity := types.Name{Name: "Quantity", Package: "k8s.io/apimachinery/pkg/api/resource"} + meta := types.Name{Name: "ObjectMeta", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} + unstructured := types.Name{Name: "Unstructured", Package: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"} + rawExtension := types.Name{Name: "RawExtension", Package: "k8s.io/apimachinery/pkg/runtime"} + intOrString := types.Name{Name: "IntOrString", Package: "k8s.io/apimachinery/pkg/util/intstr"} + // special types first + specialTypeProps := v1beta1.JSONSchemaProps{ + Description: parseDescription(comments), + } + for _, l := range comments { + getValidation(l, &specialTypeProps) + } + switch t.Name { + case time: + specialTypeProps.Type = "string" + specialTypeProps.Format = "date-time" + return specialTypeProps, b.getTime() + case duration: + specialTypeProps.Type = "string" + return specialTypeProps, b.getDuration() + case quantity: + specialTypeProps.Type = "string" + return specialTypeProps, b.getQuantity() + case meta, unstructured, rawExtension: + specialTypeProps.Type = "object" + return specialTypeProps, b.objSchema() + case intOrString: + specialTypeProps.AnyOf = []v1beta1.JSONSchemaProps{ + { + Type: "string", + }, + { + Type: "integer", + }, + } + return specialTypeProps, b.objSchema() + } + + var v v1beta1.JSONSchemaProps + var s string + switch t.Kind { + case types.Builtin: + v, s = b.parsePrimitiveValidation(t, found, comments) + case types.Struct: + v, s = b.parseObjectValidation(t, found, comments, isRoot) + case types.Map: + v, s = b.parseMapValidation(t, found, comments) + case types.Slice: + v, s = b.parseArrayValidation(t, found, comments) + case types.Array: + v, s = b.parseArrayValidation(t, found, comments) + case types.Pointer: + v, s = b.typeToJSONSchemaProps(t.Elem, found, comments, false) + case types.Alias: + v, s = b.typeToJSONSchemaProps(t.Underlying, found, comments, false) + default: + log.Fatalf("Unknown supported Kind %v\n", t.Kind) + } + + return v, s +} + +var jsonRegex = regexp.MustCompile("json:\"([a-zA-Z0-9,]+)\"") + +type primitiveTemplateArgs struct { + v1beta1.JSONSchemaProps + Value string + Format string + EnumValue string // TODO check type of enum value to match the type of field + Description string +} + +var primitiveTemplate = template.Must(template.New("map-template").Parse( + `v1beta1.JSONSchemaProps{ + {{ if .Pattern -}} + Pattern: "{{ .Pattern }}", + {{ end -}} + {{ if .Maximum -}} + Maximum: getFloat({{ .Maximum }}), + {{ end -}} + {{ if .ExclusiveMaximum -}} + ExclusiveMaximum: {{ .ExclusiveMaximum }}, + {{ end -}} + {{ if .Minimum -}} + Minimum: getFloat({{ .Minimum }}), + {{ end -}} + {{ if .ExclusiveMinimum -}} + ExclusiveMinimum: {{ .ExclusiveMinimum }}, + {{ end -}} + Type: "{{ .Value }}", + {{ if .Format -}} + Format: "{{ .Format }}", + {{ end -}} + {{ if .EnumValue -}} + Enum: {{ .EnumValue }}, + {{ end -}} + {{ if .MaxLength -}} + MaxLength: getInt({{ .MaxLength }}), + {{ end -}} + {{ if .MinLength -}} + MinLength: getInt({{ .MinLength }}), + {{ end -}} + {{ if .Nullable -}} + Nullable: true, + {{ end -}} +}`)) + +// parsePrimitiveValidation returns a JSONSchemaProps object and its +// serialization in Go that describe the validations for the given primitive +// type. +func (b *APIs) parsePrimitiveValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { + props := v1beta1.JSONSchemaProps{Type: string(t.Name.Name)} + + for _, l := range comments { + getValidation(l, &props) + } + + buff := &bytes.Buffer{} + + var n, f, s, d string + switch t.Name.Name { + case "int", "int64", "uint64": + n = "integer" + f = "int64" + case "int32", "uint32": + n = "integer" + f = "int32" + case "float", "float32": + n = "number" + f = "float" + case "float64": + n = "number" + f = "double" + case "bool": + n = "boolean" + case "string": + n = "string" + f = props.Format + default: + n = t.Name.Name + } + if props.Enum != nil { + s = parseEnumToString(props.Enum) + } + d = parseDescription(comments) + if err := primitiveTemplate.Execute(buff, primitiveTemplateArgs{props, n, f, s, d}); err != nil { + log.Fatalf("%v", err) + } + props.Type = n + props.Format = f + props.Description = d + return props, buff.String() +} + +type mapTempateArgs struct { + Result string + SkipMapValidation bool + Nullable bool +} + +var mapTemplate = template.Must(template.New("map-template").Parse( + `v1beta1.JSONSchemaProps{ + Type: "object", + {{if not .SkipMapValidation}}AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &{{.Result}}, + },{{end}} + {{ if .Nullable -}} + Nullable: true, + {{ end -}} +}`)) + +// parseMapValidation returns a JSONSchemaProps object and its serialization in +// Go that describe the validations for the given map type. +func (b *APIs) parseMapValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { + additionalProps, result := b.typeToJSONSchemaProps(t.Elem, found, nil, false) + additionalProps.Description = "" + props := v1beta1.JSONSchemaProps{ + Type: "object", + Description: parseDescription(comments), + } + parseOption := b.arguments.CustomArgs.(*Options) + if !parseOption.SkipMapValidation { + props.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &additionalProps} + } + + for _, l := range comments { + getValidation(l, &props) + } + + buff := &bytes.Buffer{} + if err := mapTemplate.Execute(buff, mapTempateArgs{Result: result, SkipMapValidation: parseOption.SkipMapValidation, Nullable: props.Nullable}); err != nil { + log.Fatalf("%v", err) + } + return props, buff.String() +} + +var arrayTemplate = template.Must(template.New("array-template").Parse( + `v1beta1.JSONSchemaProps{ + Type: "{{.Type}}", + {{ if .Format -}} + Format: "{{.Format}}", + {{ end -}} + {{ if .MaxItems -}} + MaxItems: getInt({{ .MaxItems }}), + {{ end -}} + {{ if .MinItems -}} + MinItems: getInt({{ .MinItems }}), + {{ end -}} + {{ if .UniqueItems -}} + UniqueItems: {{ .UniqueItems }}, + {{ end -}} + {{ if .Items -}} + Items: &v1beta1.JSONSchemaPropsOrArray{ + Schema: &{{.ItemsSchema}}, + }, + {{ end -}} + {{ if .Nullable -}} + Nullable: true, + {{ end -}} +}`)) + +type arrayTemplateArgs struct { + v1beta1.JSONSchemaProps + ItemsSchema string +} + +// parseArrayValidation returns a JSONSchemaProps object and its serialization in +// Go that describe the validations for the given array type. +func (b *APIs) parseArrayValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { + items, result := b.typeToJSONSchemaProps(t.Elem, found, nil, false) + items.Description = "" + props := v1beta1.JSONSchemaProps{ + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{Schema: &items}, + Description: parseDescription(comments), + } + // To represent byte arrays in the generated code, the property of the OpenAPI definition + // should have string as its type and byte as its format. + if t.Name.Name == "[]byte" { + props.Type = "string" + props.Format = "byte" + props.Items = nil + props.Description = parseDescription(comments) + } + for _, l := range comments { + getValidation(l, &props) + } + if t.Name.Name != "[]byte" { + // Except for the byte array special case above, the "format" property + // should be applied to the array items and not the array itself. + props.Format = "" + } + buff := &bytes.Buffer{} + if err := arrayTemplate.Execute(buff, arrayTemplateArgs{props, result}); err != nil { + log.Fatalf("%v", err) + } + return props, buff.String() +} + +type objectTemplateArgs struct { + v1beta1.JSONSchemaProps + Fields map[string]string + Required []string + IsRoot bool +} + +var objectTemplate = template.Must(template.New("object-template").Parse( + `v1beta1.JSONSchemaProps{ + {{ if not .IsRoot -}} + Type: "object", + {{ end -}} + Properties: map[string]v1beta1.JSONSchemaProps{ + {{ range $k, $v := .Fields -}} + "{{ $k }}": {{ $v }}, + {{ end -}} + }, + {{if .Required}}Required: []string{ + {{ range $k, $v := .Required -}} + "{{ $v }}", + {{ end -}} + },{{ end -}} + {{ if .Nullable -}} + Nullable: true, + {{ end -}} +}`)) + +// parseObjectValidation returns a JSONSchemaProps object and its serialization in +// Go that describe the validations for the given object type. +func (b *APIs) parseObjectValidation(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { + buff := &bytes.Buffer{} + props := v1beta1.JSONSchemaProps{ + Type: "object", + Description: parseDescription(comments), + } + + for _, l := range comments { + getValidation(l, &props) + } + + if strings.HasPrefix(t.Name.String(), "k8s.io/api") { + if err := objectTemplate.Execute(buff, objectTemplateArgs{props, nil, nil, false}); err != nil { + log.Fatalf("%v", err) + } + } else { + m, result, required := b.getMembers(t, found) + props.Properties = m + props.Required = required + + if err := objectTemplate.Execute(buff, objectTemplateArgs{props, result, required, isRoot}); err != nil { + log.Fatalf("%v", err) + } + } + return props, buff.String() +} + +// getValidation parses the validation tags from the comment and sets the +// validation rules on the given JSONSchemaProps. +func getValidation(comment string, props *v1beta1.JSONSchemaProps) { + if strings.TrimSpace(comment) == "+nullable" { + props.Nullable = true + } + + comment = strings.TrimLeft(comment, " ") + if !strings.HasPrefix(comment, "+kubebuilder:validation:") { + return + } + c := strings.Replace(comment, "+kubebuilder:validation:", "", -1) + parts := strings.Split(c, "=") + if len(parts) != 2 { + log.Fatalf("Expected +kubebuilder:validation:= actual: %s", comment) + return + } + switch parts[0] { + case "Maximum": + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + log.Fatalf("Could not parse float from %s: %v", comment, err) + return + } + props.Maximum = &f + case "ExclusiveMaximum": + b, err := strconv.ParseBool(parts[1]) + if err != nil { + log.Fatalf("Could not parse bool from %s: %v", comment, err) + return + } + props.ExclusiveMaximum = b + case "Minimum": + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + log.Fatalf("Could not parse float from %s: %v", comment, err) + return + } + props.Minimum = &f + case "ExclusiveMinimum": + b, err := strconv.ParseBool(parts[1]) + if err != nil { + log.Fatalf("Could not parse bool from %s: %v", comment, err) + return + } + props.ExclusiveMinimum = b + case "MaxLength": + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MaxLength = &v + case "MinLength": + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MinLength = &v + case "Pattern": + props.Pattern = parts[1] + case "MaxItems": + if props.Type == "array" { + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MaxItems = &v + } + case "MinItems": + if props.Type == "array" { + i, err := strconv.Atoi(parts[1]) + v := int64(i) + if err != nil { + log.Fatalf("Could not parse int from %s: %v", comment, err) + return + } + props.MinItems = &v + } + case "UniqueItems": + if props.Type == "array" { + b, err := strconv.ParseBool(parts[1]) + if err != nil { + log.Fatalf("Could not parse bool from %s: %v", comment, err) + return + } + props.UniqueItems = b + } + case "MultipleOf": + f, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + log.Fatalf("Could not parse float from %s: %v", comment, err) + return + } + props.MultipleOf = &f + case "Enum": + if props.Type != "array" { + value := strings.Split(parts[1], ",") + enums := []v1beta1.JSON{} + for _, s := range value { + checkType(props, s, &enums) + } + props.Enum = enums + } + case "Format": + props.Format = parts[1] + default: + log.Fatalf("Unsupport validation: %s", comment) + } +} + +// getMembers builds maps by field name of the JSONSchemaProps and their Go +// serializations. +func (b *APIs) getMembers(t *types.Type, found sets.String) (map[string]v1beta1.JSONSchemaProps, map[string]string, []string) { + members := map[string]v1beta1.JSONSchemaProps{} + result := map[string]string{} + required := []string{} + + // Don't allow recursion until we support it through refs + // TODO: Support recursion + if found.Has(t.Name.String()) { + fmt.Printf("Breaking recursion for type %s", t.Name.String()) + return members, result, required + } + found.Insert(t.Name.String()) + + for _, member := range t.Members { + tags := jsonRegex.FindStringSubmatch(member.Tags) + if len(tags) == 0 { + // Skip fields without json tags + //fmt.Printf("Skipping member %s %s\n", member.Name, member.Type.Name.String()) + continue + } + ts := strings.Split(tags[1], ",") + name := member.Name + strat := "" + if len(ts) > 0 && len(ts[0]) > 0 { + name = ts[0] + } + if len(ts) > 1 { + strat = ts[1] + } + + // Inline "inline" structs + if strat == "inline" { + m, r, re := b.getMembers(member.Type, found) + for n, v := range m { + members[n] = v + } + for n, v := range r { + result[n] = v + } + required = append(required, re...) + } else { + m, r := b.typeToJSONSchemaProps(member.Type, found, member.CommentLines, false) + members[name] = m + result[name] = r + if hasRequired(member) { + required = append(required, name) + } + } + } + + defer found.Delete(t.Name.String()) + return members, result, required +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go new file mode 100644 index 0000000000..a08cf751b5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go @@ -0,0 +1,161 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "fmt" + "log" + "strings" + + "github.com/markbates/inflect" + "k8s.io/gengo/types" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" + "sigs.k8s.io/controller-tools/pkg/internal/general" +) + +// parseIndex indexes all types with the comment "// +resource=RESOURCE" by GroupVersionKind and +// GroupKindVersion +func (b *APIs) parseIndex() { + // Index resource by group, version, kind + b.ByGroupVersionKind = map[string]map[string]map[string]*codegen.APIResource{} + + // Index resources by group, kind, version + b.ByGroupKindVersion = map[string]map[string]map[string]*codegen.APIResource{} + + // Index subresources by group, version, kind + b.SubByGroupVersionKind = map[string]map[string]map[string]*types.Type{} + + for _, c := range b.context.Order { + // The type is a subresource, add it to the subresource index + if IsAPISubresource(c) { + group := GetGroup(c) + version := GetVersion(c, group) + kind := GetKind(c, group) + if _, f := b.SubByGroupVersionKind[group]; !f { + b.SubByGroupVersionKind[group] = map[string]map[string]*types.Type{} + } + if _, f := b.SubByGroupVersionKind[group][version]; !f { + b.SubByGroupVersionKind[group][version] = map[string]*types.Type{} + } + b.SubByGroupVersionKind[group][version][kind] = c + } + + // If it isn't a subresource or resource, continue to the next type + if !IsAPIResource(c) { + continue + } + + // Parse out the resource information + r := &codegen.APIResource{ + Type: c, + NonNamespaced: IsNonNamespaced(c), + } + r.Group = GetGroup(c) + r.Version = GetVersion(c, r.Group) + r.Kind = GetKind(c, r.Group) + r.Domain = b.Domain + + // TODO: revisit the part... + if r.Resource == "" { + rs := inflect.NewDefaultRuleset() + r.Resource = rs.Pluralize(strings.ToLower(r.Kind)) + } + rt, err := parseResourceAnnotation(c) + if err != nil { + log.Fatalf("failed to parse resource annotations, error: %v", err.Error()) + } + if rt.Resource != "" { + r.Resource = rt.Resource + } + r.ShortName = rt.ShortName + + // Copy the Status strategy to mirror the non-status strategy + r.StatusStrategy = strings.TrimSuffix(r.Strategy, "Strategy") + r.StatusStrategy = fmt.Sprintf("%sStatusStrategy", r.StatusStrategy) + + // Initialize the map entries so they aren't nill + if _, f := b.ByGroupKindVersion[r.Group]; !f { + b.ByGroupKindVersion[r.Group] = map[string]map[string]*codegen.APIResource{} + } + if _, f := b.ByGroupKindVersion[r.Group][r.Kind]; !f { + b.ByGroupKindVersion[r.Group][r.Kind] = map[string]*codegen.APIResource{} + } + if _, f := b.ByGroupVersionKind[r.Group]; !f { + b.ByGroupVersionKind[r.Group] = map[string]map[string]*codegen.APIResource{} + } + if _, f := b.ByGroupVersionKind[r.Group][r.Version]; !f { + b.ByGroupVersionKind[r.Group][r.Version] = map[string]*codegen.APIResource{} + } + + // Add the resource to the map + b.ByGroupKindVersion[r.Group][r.Kind][r.Version] = r + b.ByGroupVersionKind[r.Group][r.Version][r.Kind] = r + r.Type = c + } +} + +// resourceTags contains the tags present in a "+resource=" comment +type resourceTags struct { + Resource string + REST string + Strategy string + ShortName string +} + +// resourceAnnotationValue is a helper function to extract resource annotation. +func resourceAnnotationValue(tag string) (resourceTags, error) { + res := resourceTags{} + for _, elem := range strings.Split(tag, ",") { + key, value, err := general.ParseKV(elem) + if err != nil { + return resourceTags{}, fmt.Errorf("// +kubebuilder:resource: tags must be key value pairs. Expected "+ + "keys [path=] "+ + "Got string: [%s]", tag) + } + switch key { + case "path": + res.Resource = value + case "shortName": + res.ShortName = value + default: + return resourceTags{}, fmt.Errorf("The given input %s is invalid", value) + } + } + return res, nil +} + +// parseResourceAnnotation parses the tags in a "+resource=" comment into a resourceTags struct. +func parseResourceAnnotation(t *types.Type) (resourceTags, error) { + finalResult := resourceTags{} + var resourceAnnotationFound bool + for _, comment := range t.CommentLines { + anno := general.GetAnnotation(comment, "kubebuilder:resource") + if len(anno) == 0 { + continue + } + result, err := resourceAnnotationValue(anno) + if err != nil { + return resourceTags{}, err + } + if resourceAnnotationFound { + return resourceTags{}, fmt.Errorf("resource annotation should only exists once per type") + } + resourceAnnotationFound = true + finalResult = result + } + return finalResult, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go new file mode 100644 index 0000000000..c7a55dd3de --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go @@ -0,0 +1,151 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "bufio" + "go/build" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/args" + "k8s.io/gengo/generator" + "k8s.io/gengo/types" + "sigs.k8s.io/controller-tools/pkg/internal/codegen" +) + +// APIs is the information of a collection of API +type APIs struct { + context *generator.Context + arguments *args.GeneratorArgs + Domain string + VersionedPkgs sets.String + UnversionedPkgs sets.String + APIsPkg string + APIsPkgRaw *types.Package + GroupNames sets.String + + APIs *codegen.APIs + Controllers []codegen.Controller + + ByGroupKindVersion map[string]map[string]map[string]*codegen.APIResource + ByGroupVersionKind map[string]map[string]map[string]*codegen.APIResource + SubByGroupVersionKind map[string]map[string]map[string]*types.Type + Groups map[string]types.Package + Rules []rbacv1.PolicyRule + Informers map[v1.GroupVersionKind]bool +} + +// NewAPIs returns a new APIs instance with given context. +func NewAPIs(context *generator.Context, arguments *args.GeneratorArgs, domain, apisPkg string) *APIs { + b := &APIs{ + context: context, + arguments: arguments, + Domain: domain, + APIsPkg: apisPkg, + } + b.parsePackages() + b.parseGroupNames() + b.parseIndex() + b.parseAPIs() + b.parseCRDs() + if len(b.Domain) == 0 { + b.parseDomain() + } + return b +} + +// parseGroupNames initializes b.GroupNames with the set of all groups +func (b *APIs) parseGroupNames() { + b.GroupNames = sets.String{} + for p := range b.UnversionedPkgs { + pkg := b.context.Universe[p] + if pkg == nil { + // If the input had no Go files, for example. + continue + } + b.GroupNames.Insert(filepath.Base(p)) + } +} + +// parsePackages parses out the sets of Versioned, Unversioned packages and identifies the root Apis package. +func (b *APIs) parsePackages() { + b.VersionedPkgs = sets.NewString() + b.UnversionedPkgs = sets.NewString() + for _, o := range b.context.Order { + if IsAPIResource(o) { + versioned := o.Name.Package + b.VersionedPkgs.Insert(versioned) + + unversioned := filepath.Dir(versioned) + b.UnversionedPkgs.Insert(unversioned) + } + } +} + +// parseDomain parses the domain from the apis/doc.go file comment "// +domain=YOUR_DOMAIN". +func (b *APIs) parseDomain() { + pkg := b.context.Universe[b.APIsPkg] + if pkg == nil { + // If the input had no Go files, for example. + panic(errors.Errorf("Missing apis package.")) + } + comments := Comments(pkg.Comments) + b.Domain = comments.getTag("domain", "=") + if len(b.Domain) == 0 { + b.Domain = parseDomainFromFiles(b.context.Inputs) + if len(b.Domain) == 0 { + panic("Could not find string matching // +domain=.+ in apis/doc.go") + } + } +} + +func parseDomainFromFiles(paths []string) string { + var domain string + for _, path := range paths { + if strings.HasSuffix(path, "pkg/apis") { + filePath := strings.Join([]string{build.Default.GOPATH, "src", path, "doc.go"}, "/") + lines := []string{} + + file, err := os.Open(filePath) + if err != nil { + log.Fatal(err) + } + defer file.Close() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + if strings.HasPrefix(scanner.Text(), "//") { + lines = append(lines, strings.Replace(scanner.Text(), "// ", "", 1)) + } + } + if err := scanner.Err(); err != nil { + log.Fatal(err) + } + + comments := Comments(lines) + domain = comments.getTag("domain", "=") + break + } + } + return domain +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go new file mode 100644 index 0000000000..6be15acd56 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go @@ -0,0 +1,551 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parse + +import ( + "fmt" + "log" + "path/filepath" + "strconv" + "strings" + + "github.com/pkg/errors" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/gengo/types" +) + +const ( + specReplicasPath = "specpath" + statusReplicasPath = "statuspath" + labelSelectorPath = "selectorpath" + jsonPathError = "invalid scale path. specpath, statuspath key-value pairs are required, only selectorpath key-value is optinal. For example: // +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label" + printColumnName = "name" + printColumnType = "type" + printColumnDescr = "description" + printColumnPath = "JSONPath" + printColumnFormat = "format" + printColumnPri = "priority" + printColumnError = "invalid printcolumn path. name,type, and JSONPath are required kye-value pairs and rest of the fields are optinal. For example: // +kubebuilder:printcolumn:name=abc,type=string,JSONPath=status" +) + +// Options contains the parser options +type Options struct { + SkipMapValidation bool + + // SkipRBACValidation flag determines whether to check RBAC annotations + // for the controller or not at parse stage. + SkipRBACValidation bool +} + +// IsAPIResource returns true if either of the two conditions become true: +// 1. t has a +resource/+kubebuilder:resource comment tag +// 2. t has TypeMeta and ObjectMeta in its member list. +func IsAPIResource(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+resource") || strings.Contains(c, "+kubebuilder:resource") { + return true + } + } + + typeMetaFound, objMetaFound := false, false + for _, m := range t.Members { + if m.Name == "TypeMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta" { + typeMetaFound = true + } + if m.Name == "ObjectMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta" { + objMetaFound = true + } + if typeMetaFound && objMetaFound { + return true + } + } + return false +} + +// IsNonNamespaced returns true if t has a +nonNamespaced comment tag +func IsNonNamespaced(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + + for _, c := range t.CommentLines { + if strings.Contains(c, "+genclient:nonNamespaced") { + return true + } + } + + for _, c := range t.SecondClosestCommentLines { + if strings.Contains(c, "+genclient:nonNamespaced") { + return true + } + } + + return false +} + +// IsController returns true if t has a +controller or +kubebuilder:controller tag +func IsController(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+controller") || strings.Contains(c, "+kubebuilder:controller") { + return true + } + } + return false +} + +// IsRBAC returns true if t has a +rbac or +kubebuilder:rbac tag +func IsRBAC(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+rbac") || strings.Contains(c, "+kubebuilder:rbac") { + return true + } + } + return false +} + +// hasPrintColumn returns true if t has a +printcolumn or +kubebuilder:printcolumn annotation. +func hasPrintColumn(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+printcolumn") || strings.Contains(c, "+kubebuilder:printcolumn") { + return true + } + } + return false +} + +// IsInformer returns true if t has a +informers or +kubebuilder:informers tag +func IsInformer(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+informers") || strings.Contains(c, "+kubebuilder:informers") { + return true + } + } + return false +} + +// IsAPISubresource returns true if t has a +subresource-request comment tag +func IsAPISubresource(t *types.Type) bool { + for _, c := range t.CommentLines { + if strings.Contains(c, "+subresource-request") { + return true + } + } + return false +} + +// HasSubresource returns true if t is an APIResource with one or more Subresources +func HasSubresource(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "subresource") { + return true + } + } + return false +} + +// hasStatusSubresource returns true if t is an APIResource annotated with +// +kubebuilder:subresource:status +func hasStatusSubresource(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:subresource:status") { + return true + } + } + return false +} + +// hasScaleSubresource returns true if t is an APIResource annotated with +// +kubebuilder:subresource:scale +func hasScaleSubresource(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:subresource:scale") { + return true + } + } + return false +} + +// hasCategories returns true if t is an APIResource annotated with +// +kubebuilder:categories +func hasCategories(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:categories") { + return true + } + } + return false +} + +// HasDocAnnotation returns true if t is an APIResource with doc annotation +// +kubebuilder:doc +func HasDocAnnotation(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:doc") { + return true + } + } + return false +} + +// hasSingular returns true if t is an APIResource annotated with +// +kubebuilder:singular +func hasSingular(t *types.Type) bool { + if !IsAPIResource(t) { + return false + } + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:singular") { + return true + } + } + return false +} + +// hasRequired returns true if t is annotated with +// +required +func hasRequired(t types.Member) bool { + for _, c := range t.CommentLines { + c = strings.TrimSpace(c) + if c == "+required" { + return true + } + } + return false +} + +// IsUnversioned returns true if t is in given group, and not in versioned path. +func IsUnversioned(t *types.Type, group string) bool { + return IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) && GetGroup(t) == group +} + +// IsVersioned returns true if t is in given group, and in versioned path. +func IsVersioned(t *types.Type, group string) bool { + dir := filepath.Base(filepath.Dir(filepath.Dir(t.Name.Package))) + return IsApisDir(dir) && GetGroup(t) == group +} + +// GetVersion returns version of t. +func GetVersion(t *types.Type, group string) string { + if !IsVersioned(t, group) { + panic(errors.Errorf("Cannot get version for unversioned type %v", t.Name)) + } + return filepath.Base(t.Name.Package) +} + +// GetGroup returns group of t. +func GetGroup(t *types.Type) string { + return filepath.Base(GetGroupPackage(t)) +} + +// GetGroupPackage returns group package of t. +func GetGroupPackage(t *types.Type) string { + if IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) { + return t.Name.Package + } + return filepath.Dir(t.Name.Package) +} + +// GetKind returns kind of t. +func GetKind(t *types.Type, group string) string { + if !IsVersioned(t, group) && !IsUnversioned(t, group) { + panic(errors.Errorf("Cannot get kind for type not in group %v", t.Name)) + } + return t.Name.Name +} + +// IsApisDir returns true if a directory path is a Kubernetes api directory +func IsApisDir(dir string) bool { + return dir == "apis" || dir == "api" +} + +// Comments is a structure for using comment tags on go structs and fields +type Comments []string + +// GetTags returns the value for the first comment with a prefix matching "+name=" +// e.g. "+name=foo\n+name=bar" would return "foo" +func (c Comments) getTag(name, sep string) string { + for _, c := range c { + prefix := fmt.Sprintf("+%s%s", name, sep) + if strings.HasPrefix(c, prefix) { + return strings.Replace(c, prefix, "", 1) + } + } + return "" +} + +// hasTag returns true if the Comments has a tag with the given name +func (c Comments) hasTag(name string) bool { + for _, c := range c { + prefix := fmt.Sprintf("+%s", name) + if strings.HasPrefix(c, prefix) { + return true + } + } + return false +} + +// GetTags returns the value for all comments with a prefix and separator. E.g. for "name" and "=" +// "+name=foo\n+name=bar" would return []string{"foo", "bar"} +func (c Comments) getTags(name, sep string) []string { + tags := []string{} + for _, c := range c { + prefix := fmt.Sprintf("+%s%s", name, sep) + if strings.HasPrefix(c, prefix) { + tags = append(tags, strings.Replace(c, prefix, "", 1)) + } + } + return tags +} + +// getCategoriesTag returns the value of the +kubebuilder:categories tags +func getCategoriesTag(c *types.Type) string { + comments := Comments(c.CommentLines) + resource := comments.getTag("kubebuilder:categories", "=") + if len(resource) == 0 { + panic(errors.Errorf("Must specify +kubebuilder:categories comment for type %v", c.Name)) + } + return resource +} + +// getSingularName returns the value of the +kubebuilder:singular tag +func getSingularName(c *types.Type) string { + comments := Comments(c.CommentLines) + singular := comments.getTag("kubebuilder:singular", "=") + if len(singular) == 0 { + panic(errors.Errorf("Must specify a value to use with +kubebuilder:singular comment for type %v", c.Name)) + } + return singular +} + +// getDocAnnotation parse annotations of "+kubebuilder:doc:" with tags of "warning" or "doc" for control generating doc config. +// E.g. +kubebuilder:doc:warning=foo +kubebuilder:doc:note=bar +func getDocAnnotation(t *types.Type, tags ...string) map[string]string { + annotation := make(map[string]string) + for _, tag := range tags { + for _, c := range t.CommentLines { + prefix := fmt.Sprintf("+kubebuilder:doc:%s=", tag) + if strings.HasPrefix(c, prefix) { + annotation[tag] = strings.Replace(c, prefix, "", 1) + } + } + } + return annotation +} + +// parseByteValue returns the literal digital number values from a byte array +func parseByteValue(b []byte) string { + elem := strings.Join(strings.Fields(fmt.Sprintln(b)), ",") + elem = strings.TrimPrefix(elem, "[") + elem = strings.TrimSuffix(elem, "]") + return elem +} + +// parseDescription parse comments above each field in the type definition. +func parseDescription(res []string) string { + var temp strings.Builder + var desc string + for _, comment := range res { + if !(strings.Contains(comment, "+kubebuilder") || strings.Contains(comment, "+optional") || strings.Contains(comment, "+required") || strings.Contains(comment, "+nullable")) { + temp.WriteString(comment) + temp.WriteString(" ") + desc = strings.TrimRight(temp.String(), " ") + } + } + return desc +} + +// parseEnumToString returns a representive validated go format string from JSONSchemaProps schema +func parseEnumToString(value []v1beta1.JSON) string { + res := "[]v1beta1.JSON{" + prefix := "v1beta1.JSON{[]byte{" + for _, v := range value { + res = res + prefix + parseByteValue(v.Raw) + "}}," + } + return strings.TrimSuffix(res, ",") + "}" +} + +// check type of enum element value to match type of field +func checkType(props *v1beta1.JSONSchemaProps, s string, enums *[]v1beta1.JSON) { + + // TODO support more types check + switch props.Type { + case "int", "int64", "uint64": + if _, err := strconv.ParseInt(s, 0, 64); err != nil { + log.Fatalf("Invalid integer value [%v] for a field of integer type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "int32", "unit32": + if _, err := strconv.ParseInt(s, 0, 32); err != nil { + log.Fatalf("Invalid integer value [%v] for a field of integer32 type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "float", "float32": + if _, err := strconv.ParseFloat(s, 32); err != nil { + log.Fatalf("Invalid float value [%v] for a field of float32 type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "float64": + if _, err := strconv.ParseFloat(s, 64); err != nil { + log.Fatalf("Invalid float value [%v] for a field of float type", s) + } + *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) + case "string": + *enums = append(*enums, v1beta1.JSON{Raw: []byte(`"` + s + `"`)}) + } +} + +// Scale subresource requires specpath, statuspath, selectorpath key values, represents for JSONPath of +// SpecReplicasPath, StatusReplicasPath, LabelSelectorPath separately. e.g. +// +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath= +func parseScaleParams(t *types.Type) (map[string]string, error) { + jsonPath := make(map[string]string) + for _, c := range t.CommentLines { + if strings.Contains(c, "+kubebuilder:subresource:scale") { + paths := strings.Replace(c, "+kubebuilder:subresource:scale:", "", -1) + path := strings.Split(paths, ",") + if len(path) < 2 { + return nil, fmt.Errorf(jsonPathError) + } + for _, s := range path { + kv := strings.Split(s, "=") + if kv[0] == specReplicasPath || kv[0] == statusReplicasPath || kv[0] == labelSelectorPath { + jsonPath[kv[0]] = kv[1] + } else { + return nil, fmt.Errorf(jsonPathError) + } + } + var ok bool + _, ok = jsonPath[specReplicasPath] + if !ok { + return nil, fmt.Errorf(jsonPathError) + } + _, ok = jsonPath[statusReplicasPath] + if !ok { + return nil, fmt.Errorf(jsonPathError) + } + return jsonPath, nil + } + } + return nil, fmt.Errorf(jsonPathError) +} + +// printColumnKV parses key-value string formatted as "foo=bar" and returns key and value. +func printColumnKV(s string) (key, value string, err error) { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + err = fmt.Errorf("invalid key value pair") + return key, value, err + } + key, value = kv[0], kv[1] + if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { + value = value[1 : len(value)-1] + } + return key, value, err +} + +// helperPrintColumn is a helper function for the parsePrintColumnParams to compute printer columns. +func helperPrintColumn(parts string, comment string) (v1beta1.CustomResourceColumnDefinition, error) { + config := v1beta1.CustomResourceColumnDefinition{} + var count int + part := strings.Split(parts, ",") + if len(part) < 3 { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) + } + + for _, elem := range strings.Split(parts, ",") { + key, value, err := printColumnKV(elem) + if err != nil { + return v1beta1.CustomResourceColumnDefinition{}, + fmt.Errorf("//+kubebuilder:printcolumn: tags must be key value pairs.Expected "+ + "keys [name=,type=,description=,format=] "+ + "Got string: [%s]", parts) + } + if key == printColumnName || key == printColumnType || key == printColumnPath { + count++ + } + switch key { + case printColumnName: + config.Name = value + case printColumnType: + if value == "integer" || value == "number" || value == "string" || value == "boolean" || value == "date" { + config.Type = value + } else { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnType) + } + case printColumnFormat: + if config.Type == "integer" && (value == "int32" || value == "int64") { + config.Format = value + } else if config.Type == "number" && (value == "float" || value == "double") { + config.Format = value + } else if config.Type == "string" && (value == "byte" || value == "date" || value == "date-time" || value == "password") { + config.Format = value + } else { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnFormat) + } + case printColumnPath: + config.JSONPath = value + case printColumnPri: + i, err := strconv.Atoi(value) + v := int32(i) + if err != nil { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnPri) + } + config.Priority = v + case printColumnDescr: + config.Description = value + default: + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) + } + } + if count != 3 { + return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) + } + return config, nil +} + +// printcolumn requires name,type,JSONPath fields and rest of the field are optional +// +kubebuilder:printcolumn:name=,type=,description=,JSONPath:<.spec.Name>,priority=,format= +func parsePrintColumnParams(t *types.Type) ([]v1beta1.CustomResourceColumnDefinition, error) { + result := []v1beta1.CustomResourceColumnDefinition{} + for _, comment := range t.CommentLines { + if strings.Contains(comment, "+kubebuilder:printcolumn") { + parts := strings.Replace(comment, "+kubebuilder:printcolumn:", "", -1) + res, err := helperPrintColumn(parts, comment) + if err != nil { + return []v1beta1.CustomResourceColumnDefinition{}, err + } + result = append(result, res) + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go new file mode 100644 index 0000000000..e4fda4c7e3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codegen + +import ( + "sort" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/gengo/types" +) + +// APIs is the information of a collection of API +type APIs struct { + // Domain is the domain portion of the group - e.g. k8s.io + Domain string + + // Package is the name of the root API package - e.g. github.com/my-org/my-repo/pkg/apis + Package string + + // Pkg the Package for the root API package + Pkg *types.Package + + // Groups is the list of API groups found under the apis package + Groups map[string]*APIGroup + + Rules []rbacv1.PolicyRule + + Informers map[v1.GroupVersionKind]bool +} + +// GetRules get rules of the APIs +func (apis *APIs) GetRules() []rbacv1.PolicyRule { + rules := []rbacv1.PolicyRule{} + rulesIndex := map[v1.GroupResource]sets.String{} + for _, rule := range apis.Rules { + for _, g := range rule.APIGroups { + for _, r := range rule.Resources { + gr := v1.GroupResource{ + Group: g, + Resource: r, + } + if _, found := rulesIndex[gr]; !found { + rulesIndex[gr] = sets.NewString() + } + rulesIndex[gr].Insert(rule.Verbs...) + } + } + } + for gr, v := range rulesIndex { + verbs := v.List() + sort.Strings(verbs) + rule := rbacv1.PolicyRule{ + Resources: []string{gr.Resource}, + APIGroups: []string{gr.Group}, + Verbs: verbs, + } + rules = append(rules, rule) + } + return rules +} + +// APIGroup contains information of an API group. +type APIGroup struct { + // Package is the name of the go package the api group is under - e.g. github.com/me/apiserver-helloworld/apis + Package string + // Domain is the domain portion of the group - e.g. k8s.io + Domain string + // Group is the short name of the group - e.g. mushroomkingdom + Group string + GroupTitle string + // Versions is the list of all versions for this group keyed by name + Versions map[string]*APIVersion + + UnversionedResources map[string]*APIResource + + // Structs is a list of unversioned definitions that must be generated + Structs []*Struct + Pkg *types.Package + PkgPath string +} + +// Struct contains information of a struct. +type Struct struct { + // Name is the name of the type + Name string + // genClient + GenClient bool + GenDeepCopy bool + NonNamespaced bool + + GenUnversioned bool + // Fields is the list of fields appearing in the struct + Fields []*Field +} + +// Field contains information of a field. +type Field struct { + // Name is the name of the field + Name string + // For versioned Kubernetes types, this is the versioned package + VersionedPackage string + // For versioned Kubernetes types, this is the unversioned package + UnversionedImport string + UnversionedType string +} + +// APIVersion contains information of an API version. +type APIVersion struct { + // Domain is the group domain - e.g. k8s.io + Domain string + // Group is the group name - e.g. mushroomkingdom + Group string + // Version is the api version - e.g. v1beta1 + Version string + // Resources is a list of resources appearing in the API version keyed by name + Resources map[string]*APIResource + // Pkg is the Package object from code-gen + Pkg *types.Package +} + +// APIResource contains information of an API resource. +type APIResource struct { + // Domain is the group domain - e.g. k8s.io + Domain string + // Group is the group name - e.g. mushroomkingdom + Group string + // Version is the api version - e.g. v1beta1 + Version string + // Kind is the resource name - e.g. PeachesCastle + Kind string + // Resource is the resource name - e.g. peachescastles + Resource string + // REST is the rest.Storage implementation used to handle requests + // This field is optional. The standard REST implementation will be used + // by default. + REST string + // Subresources is a map of subresources keyed by name + Subresources map[string]*APISubresource + // Type is the Type object from code-gen + Type *types.Type + // Strategy is name of the struct to use for the strategy + Strategy string + // Strategy is name of the struct to use for the strategy + StatusStrategy string + // NonNamespaced indicates that the resource kind is non namespaced + NonNamespaced bool + + ShortName string + + JSONSchemaProps v1beta1.JSONSchemaProps + CRD v1beta1.CustomResourceDefinition + Validation string + ValidationComments string + // DocAnnotation is a map of annotations by name for doc. e.g. warning, notes message + DocAnnotation map[string]string + // Categories is a list of categories the resource is part of. + Categories []string +} + +// APISubresource contains information of an API subresource. +type APISubresource struct { + // Domain is the group domain - e.g. k8s.io + Domain string + // Group is the group name - e.g. mushroomkingdom + Group string + // Version is the api version - e.g. v1beta1 + Version string + // Kind is the resource name - e.g. PeachesCastle + Kind string + // Resource is the resource name - e.g. peachescastles + Resource string + // Request is the subresource request type - e.g. ScaleCastle + Request string + // REST is the rest.Storage implementation used to handle requests + REST string + // Path is the subresource path - e.g. scale + Path string + + // ImportPackage is the import statement that must appear for the Request + ImportPackage string + + // RequestType is the type of the request + RequestType *types.Type + + // RESTType is the type of the request handler + RESTType *types.Type +} + +// Controller contains information of a controller. +type Controller struct { + Target schema.GroupVersionKind + Resource string + Pkg *types.Package + Repo string +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go new file mode 100644 index 0000000000..afa889e36a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package general + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" +) + +// isGoFile filters files from parsing. +func isGoFile(f os.FileInfo) bool { + // ignore non-Go or Go test files + name := f.Name() + return !f.IsDir() && + !strings.HasPrefix(name, ".") && + !strings.HasSuffix(name, "_test.go") && + strings.HasSuffix(name, ".go") +} + +// GetAnnotation extracts the annotation from comment text. +// It will return "foo" for comment "+kubebuilder:webhook:foo" . +func GetAnnotation(c, name string) string { + prefix := fmt.Sprintf("+%s:", name) + if strings.HasPrefix(c, prefix) { + return strings.TrimPrefix(c, prefix) + } + return "" +} + +// ParseKV parses key-value string formatted as "foo=bar" and returns key and value. +func ParseKV(s string) (key, value string, err error) { + kv := strings.Split(s, "=") + if len(kv) != 2 { + err = fmt.Errorf("invalid key value pair") + return key, value, err + } + key, value = kv[0], kv[1] + if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { + value = value[1 : len(value)-1] + } + return key, value, err +} + +// ParseDir parses the Go files under given directory and parses the annotation by +// invoking the parseFn function on each comment group (multi-lines comments). +// TODO(droot): extend it to multiple dirs +func ParseDir(dir string, parseFn func(string) error) error { + fset := token.NewFileSet() + + err := filepath.Walk(dir, + func(path string, info os.FileInfo, _ error) error { + if !isGoFile(info) { + // TODO(droot): enable this output based on verbose flag + // fmt.Println("skipping non-go file", path) + return nil + } + return ParseFile(fset, path, nil, parseFn) + }) + return err +} + +// ParseFile parses given filename or content src and parses annotations by +// invoking the parseFn function on each comment group (multi-lines comments). +func ParseFile(fset *token.FileSet, filename string, src interface{}, parseFn func(string) error) error { + f, err := parser.ParseFile(fset, filename, src, parser.ParseComments) + if err != nil { + fmt.Printf("error from parse.ParseFile: %v", err) + return err + } + + // using commentMaps here because it sanitizes the comment text by removing + // comment markers, compresses newlines etc. + cmap := ast.NewCommentMap(fset, f, f.Comments) + + for _, commentGroup := range cmap.Comments() { + err = parseFn(commentGroup.Text()) + if err != nil { + fmt.Print("error when parsing annotation") + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go new file mode 100644 index 0000000000..9649913b38 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "io" + "log" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// FileWriter is a io wrapper to write files +type FileWriter struct { + Fs afero.Fs +} + +// WriteCloser returns a WriteCloser to write to given path +func (fw *FileWriter) WriteCloser(path string) (io.Writer, error) { + if fw.Fs == nil { + fw.Fs = afero.NewOsFs() + } + dir := filepath.Dir(path) + err := fw.Fs.MkdirAll(dir, 0700) + if err != nil { + return nil, err + } + + fi, err := fw.Fs.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil, err + } + + return fi, nil +} + +// WriteFile write given content to the file path +func (fw *FileWriter) WriteFile(filePath string, content []byte) error { + if fw.Fs == nil { + fw.Fs = afero.NewOsFs() + } + f, err := fw.WriteCloser(filePath) + if err != nil { + return fmt.Errorf("failed to create %s: %v", filePath, err) + } + + if c, ok := f.(io.Closer); ok { + defer func() { + if err := c.Close(); err != nil { + log.Fatal(err) + } + }() + } + + _, err = f.Write(content) + if err != nil { + return fmt.Errorf("failed to write %s: %v", filePath, err) + } + + return nil +} From 64f85e70b681cc87532a7bdeaacb9c77c2bb83c8 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 2 Apr 2019 15:06:55 +0200 Subject: [PATCH 3/4] Wire crd-schema-gen --- Makefile | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Makefile b/Makefile index 3246b6d6ea..8f8219f322 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,6 @@ +all: build +.PHONY: all + build: hack/build-go.sh .PHONY: build @@ -5,3 +8,16 @@ build: test: go test ./... .PHONY: test + +clean: + rm -rf _output/ +.PHONY: clean + +update-codegen-crds: + go run ./vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go --domain openshift.io --apis-dir vendor/github.com/openshift/api --manifests-dir install/ +update-codegen: update-codegen-crds +verify-codegen-crds: + go run ./vendor/github.com/openshift/library-go/cmd/crd-schema-gen/main.go --domain openshift.io --apis-dir vendor/github.com/openshift/api --manifests-dir install/ --verify-only +verify-codegen: verify-codegen-crds +verify: verify-codegen +.PHONY: update-codegen-crds update-codegen verify-codegen-crds verify-codegen verify From 6750bf41fa6b291d29c373e2dae6db3892e08c32 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 19 Apr 2019 16:07:39 +0200 Subject: [PATCH 4/4] Generated --- ...rsion-operator_01_clusteroperator.crd.yaml | 96 ++++++ ...ersion-operator_01_clusterversion.crd.yaml | 274 +++++++++++++++++- 2 files changed, 357 insertions(+), 13 deletions(-) diff --git a/install/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/install/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml index ea4026ca86..839b26554f 100644 --- a/install/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml +++ b/install/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml @@ -40,3 +40,99 @@ spec: - name: v1 served: true storage: true + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec hold the intent of how this operator should behave. + type: object + status: + description: status holds the information about the state of an operator. It + is consistent with status information across the kube ecosystem. + properties: + conditions: + description: conditions describes the state of the operator's reconciliation + functionality. +patchMergeKey=type +patchStrategy=merge + items: + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status object. + format: date-time + type: string + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. + type: string + reason: + description: reason is the reason for the condition's last transition. Reasons + are CamelCase + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the state of the operator's reconciliation + functionality. + type: string + type: object + type: array + extension: + description: extension contains any additional status information specific + to the operator which owns this status object. + nullable: true + type: object + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" + or related to this operator. Common uses are: 1. the detailed resource + driving the operator 2. operator namespaces 3. operand namespaces' + items: + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + type: object + type: array + versions: + description: versions is a slice of operand version tuples. Operators + which manage multiple operands will have multiple entries in the array. If + an operator is Available, it must have at least one entry. You must + report the version of the operator itself with the name "operator". + items: + properties: + name: + description: name is the name of the particular operand this version + is for. It usually matches container images, not operators. + type: string + version: + description: version indicates which version of a particular operand + is currently being manage. It must always match the Available + condition. If 1.0.0 is Available, then this must indicate 1.0.0 + even if the operator is trying to rollout 1.1.0 + type: string + type: object + type: array + type: object + required: + - spec diff --git a/install/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/install/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index 6dbd7da830..bc98920144 100644 --- a/install/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/install/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -1,29 +1,19 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - # name must match the spec fields below, and be in the form: . name: clusterversions.config.openshift.io spec: - # group name to use for REST API: /apis// group: config.openshift.io - # list of versions supported by this CustomResourceDefinition versions: - - name: v1 - # Each version can be enabled/disabled by Served flag. - served: true - # One and only one version must be marked as the storage version. - storage: true - # either Namespaced or Cluster + - name: v1 + served: true + storage: true scope: Cluster subresources: - # enable spec/status status: {} names: - # plural name to be used in the URL: /apis/// plural: clusterversions - # singular name to be used as an alias on the CLI and for display singular: clusterversion - # kind is normally the CamelCased singular type. Your resource manifests use this. kind: ClusterVersion additionalPrinterColumns: - name: Version @@ -41,3 +31,261 @@ spec: - name: Status type: string JSONPath: .status.conditions[?(@.type=="Progressing")].message + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator + will work to ensure that the desired version is applied to the cluster. + properties: + channel: + description: channel is an identifier for explicitly requesting that + a non-default set of updates be applied to this cluster. The default + channel will be contain stable updates that are appropriate for production + clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected + to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: desiredUpdate is an optional field that indicates the desired + value of the cluster version. Setting this value will trigger an upgrade + (if the current version does not match the desired version). The set + of recommended update values is listed as part of available updates + in status, and setting values outside that range may cause the upgrade + to fail. You may specify the version field without setting image if + an update exists with that version in the availableUpdates or history. If + an upgrade fails the operator will halt and report status about the + failing component. Setting the desired update value back to the previous + version will cause a rollback to be attempted. Not all rollbacks will + succeed. + properties: + force: + description: force allows an administrator to update to an image + that has failed verification, does not appear in the availableUpdates + list, or otherwise would be blocked by normal protections on update. + This option should only be used when the authenticity of the provided + image has been verified out of band because the provided image + will run with full administrative access to the cluster. Do not + use this flag with images that comes from unknown or potentially + malicious sources. This flag does not override other forms of + consistency checking that are required before a new update is + deployed. + type: boolean + image: + description: image is a container image location that contains the + update. When this field is part of spec, image is optional if + version is specified and the availableUpdates field contains a + matching version. + type: string + version: + description: version is a semantic versioning identifying the update + version. When this field is part of spec, version is optional + if image is specified. + type: string + type: object + overrides: + description: overrides is list of overides for components that are managed + by cluster version operator. Marking a component unmanaged will prevent + the operator from creating or updating the object. + items: + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource + is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should + stop managing the resources in this cluster. Default: false' + type: boolean + type: object + type: array + upstream: + description: upstream may be used to specify the preferred update server. + By default it will use the appropriate update server for the cluster + and region. + type: string + type: object + status: + description: status contains information about the available updates and + any in-progress updates. + properties: + availableUpdates: + description: availableUpdates contains the list of updates that are + appropriate for this cluster. This list may be empty if no updates + are recommended, if the update service is unavailable, or if an invalid + channel has been specified. + items: + properties: + force: + description: force allows an administrator to update to an image + that has failed verification, does not appear in the availableUpdates + list, or otherwise would be blocked by normal protections on + update. This option should only be used when the authenticity + of the provided image has been verified out of band because + the provided image will run with full administrative access + to the cluster. Do not use this flag with images that comes + from unknown or potentially malicious sources. This flag does + not override other forms of consistency checking that are required + before a new update is deployed. + type: boolean + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is optional + if version is specified and the availableUpdates field contains + a matching version. + type: string + version: + description: version is a semantic versioning identifying the + update version. When this field is part of spec, version is + optional if image is specified. + type: string + type: object + nullable: true + type: array + conditions: + description: conditions provides information about the cluster version. + The condition "Available" is set to true if the desiredUpdate has + been reached. The condition "Progressing" is set to true if an update + is being applied. The condition "Degraded" is set to true if an update + is currently blocked by a temporary or permanent error. Conditions + are only valid for the current desiredUpdate when metadata.generation + is equal to status.generation. + items: + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status object. + format: date-time + type: string + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. + type: string + reason: + description: reason is the reason for the condition's last transition. Reasons + are CamelCase + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the state of the operator's reconciliation + functionality. + type: string + type: object + type: array + desired: + description: desired is the version that the cluster is reconciling + towards. If the cluster is not yet fully initialized desired will + be set with the information available, which may be an image or a + tag. + properties: + force: + description: force allows an administrator to update to an image + that has failed verification, does not appear in the availableUpdates + list, or otherwise would be blocked by normal protections on update. + This option should only be used when the authenticity of the provided + image has been verified out of band because the provided image + will run with full administrative access to the cluster. Do not + use this flag with images that comes from unknown or potentially + malicious sources. This flag does not override other forms of + consistency checking that are required before a new update is + deployed. + type: boolean + image: + description: image is a container image location that contains the + update. When this field is part of spec, image is optional if + version is specified and the availableUpdates field contains a + matching version. + type: string + version: + description: version is a semantic versioning identifying the update + version. When this field is part of spec, version is optional + if image is specified. + type: string + type: object + history: + description: history contains a list of the most recent versions applied + to the cluster. This value may be empty during cluster startup, and + then will be updated when a new update is being applied. The newest + update is first in the list and it is ordered by recency. Updates + in the history have state Completed if the rollout completed - if + an update was failing or halfway applied the state will be Partial. + Only a limited amount of update history is preserved. + items: + properties: + completionTime: + description: completionTime, if set, is when the update was fully + applied. The update that is currently being applied will have + a null completion time. Completion time will always be set for + entries that are not the current update (usually to the started + time of the next update). + format: date-time + nullable: true + type: string + image: + description: image is a container image location that contains + the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + format: date-time + type: string + state: + description: state reflects whether the update was fully applied. + The Partial state indicates the update is not fully applied, + while the Completed state indicates the update was successfully + rolled out at least once (all parts of the update successfully + applied). + type: string + verified: + description: verified indicates whether the provided update was + properly verified before it was installed. If this is false + the cluster may not be trusted. + type: boolean + version: + description: version is a semantic versioning identifying the + update version. If the requested image does not define a version, + or if a failure occurs retrieving the image, this value may + be empty. + type: string + type: object + type: array + observedGeneration: + description: observedGeneration reports which version of the spec is + being synced. If this value is not equal to metadata.generation, then + the desired and conditions fields may represent from a previous version. + format: int64 + type: integer + versionHash: + description: versionHash is a fingerprint of the content that the cluster + will be updated with. It is used by the operator to avoid unnecessary + work and is for internal use only. + type: string + type: object + required: + - spec