diff --git a/Makefile b/Makefile index a7b483ed5e19..65ab61855fca 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ build-network: .PHONY: build-network build-extended-test: - hack/build-go.sh test/extended/extended.test + hack/build-go.sh cmd/openshift-tests .PHONY: build-extended-test build-integration-test: build-router-e2e-test diff --git a/cmd/openshift-tests/e2e.go b/cmd/openshift-tests/e2e.go new file mode 100644 index 000000000000..61a2f5af9036 --- /dev/null +++ b/cmd/openshift-tests/e2e.go @@ -0,0 +1,113 @@ +package main + +import ( + "strings" + "time" + + "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + + "github.com/openshift/origin/pkg/test/ginkgo" + + _ "github.com/openshift/origin/test/extended" +) + +// staticSuites are all known test suites this binary should run +var staticSuites = []*ginkgo.TestSuite{ + { + Name: "openshift/conformance", + Description: templates.LongDesc(` + Tests that ensure an OpenShift cluster and components are working properly. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[Suite:openshift/conformance/") + }, + Parallelism: 30, + }, + { + Name: "openshift/conformance/parallel", + Description: templates.LongDesc(` + Only the portion of the openshift/conformance test suite that run in parallel. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[Suite:openshift/conformance/parallel") + }, + Parallelism: 30, + }, + { + Name: "openshift/conformance/serial", + Description: templates.LongDesc(` + Only the portion of the openshift/conformance test suite that run serially. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[Suite:openshift/conformance/serial") + }, + }, + { + Name: "kubernetes/conformance", + Description: templates.LongDesc(` + The default Kubernetes conformance suite. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[Suite:k8s]") && strings.Contains(name, "[Conformance]") + }, + Parallelism: 30, + }, + { + Name: "openshift/build", + Description: templates.LongDesc(` + Tests that exercise the OpenShift build functionality. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[Feature:Builds]") + }, + Parallelism: 7, + // Jenkins tests can take upwards of 40 minutes + TestTimeout: 45 * time.Minute, + }, + { + Name: "openshift/image-registry", + Description: templates.LongDesc(` + Tests that exercise the OpenShift image-registry functionality. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[registry]") && !strings.Contains(name, "[Local]") + }, + }, + { + Name: "openshift/image-ecosystem", + Description: templates.LongDesc(` + Tests that exercise language and tooling images shipped as part of OpenShift. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[image_ecosystem]") && !strings.Contains(name, "[Local]") + }, + Parallelism: 7, + TestTimeout: 20 * time.Minute, + }, + { + Name: "openshift/smoke-4", + Description: templates.LongDesc(` + Tests that verify a 4.X cluster (using the new operator based core) is ready. This + suite will be removed in favor of openshift/conformance once all functionality is + available. + `), + Matches: func(name string) bool { + return strings.Contains(name, "[Suite:openshift/smoke-4]") + }, + Parallelism: 10, + }, + { + Name: "openshift/all", + Description: templates.LongDesc(` + Run all tests. + `), + Matches: func(name string) bool { return true }, + }, + { + Name: "kubernetes/all", + Description: templates.LongDesc(` + Run all Kubernetes tests. + `), + Matches: func(name string) bool { return strings.Contains(name, "[k8s.io]") }, + }, +} diff --git a/cmd/openshift-tests/openshift-tests.go b/cmd/openshift-tests/openshift-tests.go new file mode 100644 index 000000000000..b56b59d21e21 --- /dev/null +++ b/cmd/openshift-tests/openshift-tests.go @@ -0,0 +1,175 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "math/rand" + "os" + "time" + + "github.com/onsi/gomega" + + "github.com/golang/glog" + "github.com/onsi/ginkgo" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/apiserver/pkg/util/logs" + "k8s.io/kubernetes/pkg/kubectl/cmd/templates" + e2e "k8s.io/kubernetes/test/e2e/framework" + + "github.com/openshift/origin/pkg/cmd/flagtypes" + testginkgo "github.com/openshift/origin/pkg/test/ginkgo" + exutil "github.com/openshift/origin/test/extended/util" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + rand.Seed(time.Now().UTC().UnixNano()) + + root := &cobra.Command{ + Long: templates.LongDesc(` + OpenShift Tests + + This command verifies behavior of an OpenShift cluster by running remote tests against + the cluster API that exercise functionality. In general these tests may be disruptive + or require elevated privileges - see the descriptions of each test suite. + `), + } + flagtypes.GLog(root.PersistentFlags()) + + suites := staticSuites + + suiteOpt := &testginkgo.Options{ + DetectFlakes: 6, + Suites: suites, + } + cmd := &cobra.Command{ + Use: "run SUITE", + Short: "Run a test suite", + Long: templates.LongDesc(` + Run a test suite against an OpenShift server + + This command will run one of the following suites against a cluster identified by the current + KUBECONFIG file. See the suite description for more on what actions the suite will take. + + If you specify the --dry-run argument, the names of each individual test that is part of the + suite will be printed, one per line. You may filter this list and pass it back to the run + command with the --file argument. You may also pipe a list of test names, one per line, on + standard input by passing "-f -". + + `) + testginkgo.SuitesString(suites, "\n\nAvailable test suites:\n\n"), + + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + var exitErr error + var out, errOut io.Writer = os.Stdout, os.Stderr + if len(suiteOpt.OutFile) > 0 { + f, err := os.OpenFile(suiteOpt.OutFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0640) + if err != nil { + return err + } + defer func() { + if exitErr != nil { + fmt.Fprintf(f, "error: %s", exitErr) + } + if err := f.Close(); err != nil { + fmt.Fprintf(os.Stderr, "error: Unable to close output file\n") + } + }() + out = io.MultiWriter(out, f) + errOut = io.MultiWriter(errOut, f) + } + suiteOpt.Out, suiteOpt.ErrOut = out, errOut + + if exitErr = initProvider(suiteOpt.Provider); exitErr != nil { + return exitErr + } + os.Setenv("TEST_PROVIDER", suiteOpt.Provider) + exitErr = suiteOpt.Run(args) + return exitErr + }, + } + cmd.Flags().BoolVar(&suiteOpt.DryRun, "dry-run", suiteOpt.DryRun, "Print the tests to run without executing them.") + cmd.Flags().StringVar(&suiteOpt.JUnitDir, "junit-dir", suiteOpt.JUnitDir, "The directory to write test reports to.") + cmd.Flags().StringVar(&suiteOpt.Provider, "provider", suiteOpt.Provider, "The cluster infrastructure provider. Will automatically default to the correct value.") + cmd.Flags().StringVarP(&suiteOpt.TestFile, "file", "f", suiteOpt.TestFile, "Create a suite from the newline-delimited test names in this file.") + cmd.Flags().StringVarP(&suiteOpt.OutFile, "output-file", "o", suiteOpt.OutFile, "Write all test output to this file.") + cmd.Flags().DurationVar(&suiteOpt.Timeout, "timeout", suiteOpt.Timeout, "Set the maximum time a test can run before being aborted. This is read from the suite by default, but will be 10 minutes otherwise.") + root.AddCommand(cmd) + + testOpt := &testginkgo.TestOptions{} + cmd = &cobra.Command{ + Use: "run-test NAME", + Short: "Run a single test by name", + Long: templates.LongDesc(` + Execute a single test + + This executes a single test by name. It is used by the run command during suite execution but may also + be used to test in isolation while developing new tests. + `), + + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + if err := initProvider(os.Getenv("TEST_PROVIDER")); err != nil { + return err + } + return testOpt.Run(args) + }, + } + cmd.Flags().BoolVar(&testOpt.DryRun, "dry-run", testOpt.DryRun, "Print the test to run without executing them.") + root.AddCommand(cmd) + + pflag.CommandLine = pflag.NewFlagSet("empty", pflag.ExitOnError) + flag.CommandLine = flag.NewFlagSet("empty", flag.ExitOnError) + exutil.InitStandardFlags() + + if err := root.Execute(); err != nil { + os.Exit(1) + } +} + +func initProvider(provider string) error { + // record the exit error to the output file + if err := decodeProviderTo(provider, exutil.TestContext); err != nil { + return err + } + exutil.TestContext.AllowedNotReadyNodes = 100 + + exutil.AnnotateTestSuite() + exutil.InitTest() + gomega.RegisterFailHandler(ginkgo.Fail) + + // TODO: infer SSH keys from the cluster + return nil +} + +func decodeProviderTo(provider string, testContext *e2e.TestContextType) error { + switch provider { + case "": + if _, ok := os.LookupEnv("KUBE_SSH_USER"); ok { + if _, ok := os.LookupEnv("LOCAL_SSH_KEY"); ok { + testContext.Provider = "local" + } + } + // TODO: detect which provider the cluster is running and use that as a default. + default: + var providerInfo struct{ Type string } + if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil { + return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err) + } + if len(providerInfo.Type) == 0 { + return fmt.Errorf("provider must be a JSON object with the 'type' key") + } + testContext.Provider = providerInfo.Type + if err := json.Unmarshal([]byte(provider), &testContext.CloudConfig); err != nil { + return fmt.Errorf("provider must decode into the cloud config object: %v", err) + } + } + glog.V(2).Infof("Provider %s: %#v", testContext.Provider, testContext.CloudConfig) + return nil +} diff --git a/glide.lock b/glide.lock index 676ff217aaf1..fe6aa70e0e7e 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: f2456ad942294612c1b98b764126de079fcaf3915fdc91653f94d73e66baaae4 -updated: 2018-11-05T13:31:13.552981925-05:00 +updated: 2018-11-10T16:03:45.102647622-05:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -742,7 +742,7 @@ imports: - name: github.com/NYTimes/gziphandler version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb - name: github.com/onsi/ginkgo - version: 6e93088a6ab736ec6a12d10ef99962f8d9e435ee + version: 2adc71ecca6a3b0d54214b3c1ea1c2cfc11e970a repo: https://github.com/openshift/onsi-ginkgo.git subpackages: - config @@ -815,7 +815,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: 22c4ad19c5f2de1ffe266456e0a31e8fd1069f44 + version: c3b47bfe89cdc8f9eeba184e9ab130518893e471 subpackages: - apps - apps/v1 @@ -860,7 +860,7 @@ imports: - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: 8641daf5752d24858c43f5f28feb2abc0afbfb2c + version: 94890c75c8081c1f818f137be55d0720758c4638 subpackages: - apps/clientset/versioned - apps/clientset/versioned/fake @@ -1607,7 +1607,7 @@ imports: - plugin/pkg/authenticator/token/webhook - plugin/pkg/authorizer/webhook - name: k8s.io/client-go - version: 427190a5c8fb757089de86d5081f2e058408d217 + version: b28cdde46f9c296b8ecde9c0bed0d215b6cda59d repo: https://github.com/openshift/kubernetes-client-go.git subpackages: - discovery @@ -1849,7 +1849,7 @@ imports: - pkg/util/proto/testing - pkg/util/proto/validation - name: k8s.io/kubernetes - version: b39448a71a2733b79ea5458aa28a823700f82e44 + version: 22b8c511baaf712194dee3eb9c6d8e96d282d3a5 repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/controller-manager/app @@ -2649,6 +2649,9 @@ imports: - test/e2e/storage/vsphere - test/images/net/common - test/images/net/nat + - test/integration + - test/integration/etcd + - test/integration/framework - test/utils - test/utils/image - third_party/forked/golang/expansion @@ -2674,7 +2677,7 @@ imports: - pkg/client/custom_metrics - pkg/client/external_metrics - name: k8s.io/sample-apiserver - version: 7f1db76f88256f6e74acfc5253bb279b3eb15b91 + version: cb8e07308edfc551f1b83405c19ede45e6008a4d subpackages: - pkg/apis/wardle - pkg/apis/wardle/v1alpha1 diff --git a/hack/build-cross.sh b/hack/build-cross.sh index d7ed2bcd8f94..78b15bdce398 100755 --- a/hack/build-cross.sh +++ b/hack/build-cross.sh @@ -75,10 +75,6 @@ os::build::build_binaries "${OS_IMAGE_COMPILE_TARGETS_LINUX[@]-}" OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}") os::build::build_binaries "${OS_CROSS_COMPILE_TARGETS[@]}" -# Build the test binaries for the host platform -OS_BUILD_PLATFORMS=("${test_platforms[@]+"${test_platforms[@]}"}") -os::build::build_binaries "${OS_TEST_TARGETS[@]}" - if [[ "${OS_BUILD_RELEASE_ARCHIVES-}" != "n" ]]; then # Make the primary client/server release. OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}") diff --git a/hack/lib/constants.sh b/hack/lib/constants.sh index be7ded9d3bf1..835928c9144b 100755 --- a/hack/lib/constants.sh +++ b/hack/lib/constants.sh @@ -38,6 +38,7 @@ readonly OS_IMAGE_COMPILE_TARGETS_LINUX=( cmd/template-service-broker cmd/openshift-node-config cmd/openshift-sdn + cmd/openshift-tests cmd/openshift vendor/k8s.io/kubernetes/cmd/hyperkube ) @@ -51,10 +52,6 @@ readonly OS_CROSS_COMPILE_TARGETS=( ) readonly OS_CROSS_COMPILE_BINARIES=("${OS_CROSS_COMPILE_TARGETS[@]##*/}") -readonly OS_TEST_TARGETS=( - test/extended/extended.test -) - readonly OS_GOVET_BLACKLIST=( ) diff --git a/origin.spec b/origin.spec index 8fce878b4661..cd5b755c4ea8 100644 --- a/origin.spec +++ b/origin.spec @@ -225,7 +225,6 @@ of docker. Exclude those versions of docker. %if 0%{make_redistributable} # Create Binaries for all supported arches %{os_git_vars} OS_BUILD_RELEASE_ARCHIVES=n make build-cross -%{os_git_vars} OS_BUILD_RELEASE_ARCHIVES=n make build WHAT=vendor/github.com/onsi/ginkgo/ginkgo %else # Create Binaries only for building arch %ifarch x86_64 @@ -244,7 +243,6 @@ of docker. Exclude those versions of docker. BUILD_PLATFORM="linux/s390x" %endif OS_ONLY_BUILD_PLATFORMS="${BUILD_PLATFORM}" %{os_git_vars} OS_BUILD_RELEASE_ARCHIVES=n make build-cross -OS_ONLY_BUILD_PLATFORMS="${BUILD_PLATFORM}" %{os_git_vars} OS_BUILD_RELEASE_ARCHIVES=n make build WHAT=vendor/github.com/onsi/ginkgo/ginkgo %endif # Generate man pages @@ -257,17 +255,12 @@ PLATFORM="$(go env GOHOSTOS)/$(go env GOHOSTARCH)" install -d %{buildroot}%{_bindir} # Install linux components -for bin in oc openshift hypershift hyperkube template-service-broker openshift-node-config openshift-sdn +for bin in oc openshift hypershift hyperkube template-service-broker openshift-node-config openshift-sdn openshift-tests do echo "+++ INSTALLING ${bin}" install -p -m 755 _output/local/bin/${PLATFORM}/${bin} %{buildroot}%{_bindir}/${bin} done -# Install tests -install -d %{buildroot}%{_libexecdir}/%{name} -install -p -m 755 _output/local/bin/${PLATFORM}/extended.test %{buildroot}%{_libexecdir}/%{name}/ -install -p -m 755 _output/local/bin/${PLATFORM}/ginkgo %{buildroot}%{_libexecdir}/%{name}/ - %if 0%{?make_redistributable} # Install client executable for windows and mac install -d %{buildroot}%{_datadir}/%{name}/{linux,macosx,windows} @@ -369,8 +362,7 @@ touch --reference=%{SOURCE0} $RPM_BUILD_ROOT/usr/sbin/%{name}-docker-excluder %ghost %config(noreplace) %{_sysconfdir}/origin/.config_managed %files tests -%{_libexecdir}/%{name} -%{_libexecdir}/%{name}/extended.test +%{_bindir}/openshift-tests %files hypershift %{_bindir}/hypershift diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index fb0bd057b714..a76bb6976d7c 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -127,6 +127,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/build/v1.WebHookTrigger": schema_openshift_api_build_v1_WebHookTrigger(ref), "github.com/openshift/api/config/v1.AdmissionPluginConfig": schema_openshift_api_config_v1_AdmissionPluginConfig(ref), "github.com/openshift/api/config/v1.AuditConfig": schema_openshift_api_config_v1_AuditConfig(ref), + "github.com/openshift/api/config/v1.Authentication": schema_openshift_api_config_v1_Authentication(ref), + "github.com/openshift/api/config/v1.AuthenticationList": schema_openshift_api_config_v1_AuthenticationList(ref), + "github.com/openshift/api/config/v1.AuthenticationSpec": schema_openshift_api_config_v1_AuthenticationSpec(ref), + "github.com/openshift/api/config/v1.AuthenticationStatus": schema_openshift_api_config_v1_AuthenticationStatus(ref), "github.com/openshift/api/config/v1.Build": schema_openshift_api_config_v1_Build(ref), "github.com/openshift/api/config/v1.BuildDefaults": schema_openshift_api_config_v1_BuildDefaults(ref), "github.com/openshift/api/config/v1.BuildList": schema_openshift_api_config_v1_BuildList(ref), @@ -135,20 +139,56 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/config/v1.CertInfo": schema_openshift_api_config_v1_CertInfo(ref), "github.com/openshift/api/config/v1.ClientConnectionOverrides": schema_openshift_api_config_v1_ClientConnectionOverrides(ref), "github.com/openshift/api/config/v1.ConfigMapReference": schema_openshift_api_config_v1_ConfigMapReference(ref), + "github.com/openshift/api/config/v1.Console": schema_openshift_api_config_v1_Console(ref), + "github.com/openshift/api/config/v1.ConsoleList": schema_openshift_api_config_v1_ConsoleList(ref), + "github.com/openshift/api/config/v1.ConsoleSpec": schema_openshift_api_config_v1_ConsoleSpec(ref), + "github.com/openshift/api/config/v1.ConsoleStatus": schema_openshift_api_config_v1_ConsoleStatus(ref), + "github.com/openshift/api/config/v1.DNS": schema_openshift_api_config_v1_DNS(ref), + "github.com/openshift/api/config/v1.DNSList": schema_openshift_api_config_v1_DNSList(ref), + "github.com/openshift/api/config/v1.DNSSpec": schema_openshift_api_config_v1_DNSSpec(ref), + "github.com/openshift/api/config/v1.DNSStatus": schema_openshift_api_config_v1_DNSStatus(ref), "github.com/openshift/api/config/v1.EtcdConnectionInfo": schema_openshift_api_config_v1_EtcdConnectionInfo(ref), "github.com/openshift/api/config/v1.EtcdStorageConfig": schema_openshift_api_config_v1_EtcdStorageConfig(ref), "github.com/openshift/api/config/v1.GenericAPIServerConfig": schema_openshift_api_config_v1_GenericAPIServerConfig(ref), "github.com/openshift/api/config/v1.HTTPServingInfo": schema_openshift_api_config_v1_HTTPServingInfo(ref), + "github.com/openshift/api/config/v1.IdentityProvider": schema_openshift_api_config_v1_IdentityProvider(ref), + "github.com/openshift/api/config/v1.IdentityProviderList": schema_openshift_api_config_v1_IdentityProviderList(ref), + "github.com/openshift/api/config/v1.IdentityProviderSpec": schema_openshift_api_config_v1_IdentityProviderSpec(ref), + "github.com/openshift/api/config/v1.IdentityProviderStatus": schema_openshift_api_config_v1_IdentityProviderStatus(ref), "github.com/openshift/api/config/v1.Image": schema_openshift_api_config_v1_Image(ref), "github.com/openshift/api/config/v1.ImageLabel": schema_openshift_api_config_v1_ImageLabel(ref), "github.com/openshift/api/config/v1.ImageList": schema_openshift_api_config_v1_ImageList(ref), "github.com/openshift/api/config/v1.ImageSpec": schema_openshift_api_config_v1_ImageSpec(ref), "github.com/openshift/api/config/v1.ImageStatus": schema_openshift_api_config_v1_ImageStatus(ref), + "github.com/openshift/api/config/v1.Infrastructure": schema_openshift_api_config_v1_Infrastructure(ref), + "github.com/openshift/api/config/v1.InfrastructureList": schema_openshift_api_config_v1_InfrastructureList(ref), + "github.com/openshift/api/config/v1.InfrastructureSpec": schema_openshift_api_config_v1_InfrastructureSpec(ref), + "github.com/openshift/api/config/v1.InfrastructureStatus": schema_openshift_api_config_v1_InfrastructureStatus(ref), + "github.com/openshift/api/config/v1.Ingress": schema_openshift_api_config_v1_Ingress(ref), + "github.com/openshift/api/config/v1.IngressList": schema_openshift_api_config_v1_IngressList(ref), + "github.com/openshift/api/config/v1.IngressSpec": schema_openshift_api_config_v1_IngressSpec(ref), + "github.com/openshift/api/config/v1.IngressStatus": schema_openshift_api_config_v1_IngressStatus(ref), "github.com/openshift/api/config/v1.KubeClientConfig": schema_openshift_api_config_v1_KubeClientConfig(ref), "github.com/openshift/api/config/v1.LeaderElection": schema_openshift_api_config_v1_LeaderElection(ref), "github.com/openshift/api/config/v1.NamedCertificate": schema_openshift_api_config_v1_NamedCertificate(ref), + "github.com/openshift/api/config/v1.Network": schema_openshift_api_config_v1_Network(ref), + "github.com/openshift/api/config/v1.NetworkList": schema_openshift_api_config_v1_NetworkList(ref), + "github.com/openshift/api/config/v1.NetworkSpec": schema_openshift_api_config_v1_NetworkSpec(ref), + "github.com/openshift/api/config/v1.NetworkStatus": schema_openshift_api_config_v1_NetworkStatus(ref), + "github.com/openshift/api/config/v1.OAuth": schema_openshift_api_config_v1_OAuth(ref), + "github.com/openshift/api/config/v1.OAuthList": schema_openshift_api_config_v1_OAuthList(ref), + "github.com/openshift/api/config/v1.OAuthSpec": schema_openshift_api_config_v1_OAuthSpec(ref), + "github.com/openshift/api/config/v1.OAuthStatus": schema_openshift_api_config_v1_OAuthStatus(ref), + "github.com/openshift/api/config/v1.Project": schema_openshift_api_config_v1_Project(ref), + "github.com/openshift/api/config/v1.ProjectList": schema_openshift_api_config_v1_ProjectList(ref), + "github.com/openshift/api/config/v1.ProjectSpec": schema_openshift_api_config_v1_ProjectSpec(ref), + "github.com/openshift/api/config/v1.ProjectStatus": schema_openshift_api_config_v1_ProjectStatus(ref), "github.com/openshift/api/config/v1.RegistryLocation": schema_openshift_api_config_v1_RegistryLocation(ref), "github.com/openshift/api/config/v1.RemoteConnectionInfo": schema_openshift_api_config_v1_RemoteConnectionInfo(ref), + "github.com/openshift/api/config/v1.Scheduling": schema_openshift_api_config_v1_Scheduling(ref), + "github.com/openshift/api/config/v1.SchedulingList": schema_openshift_api_config_v1_SchedulingList(ref), + "github.com/openshift/api/config/v1.SchedulingSpec": schema_openshift_api_config_v1_SchedulingSpec(ref), + "github.com/openshift/api/config/v1.SchedulingStatus": schema_openshift_api_config_v1_SchedulingStatus(ref), "github.com/openshift/api/config/v1.ServingInfo": schema_openshift_api_config_v1_ServingInfo(ref), "github.com/openshift/api/config/v1.StringSource": schema_openshift_api_config_v1_StringSource(ref), "github.com/openshift/api/config/v1.StringSourceSpec": schema_openshift_api_config_v1_StringSourceSpec(ref), @@ -6716,6 +6756,121 @@ func schema_openshift_api_config_v1_AuditConfig(ref common.ReferenceCallback) co } } +func schema_openshift_api_config_v1_Authentication(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Authentication holds cluster-wide information about Authentication. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.AuthenticationSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.AuthenticationStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.AuthenticationSpec", "github.com/openshift/api/config/v1.AuthenticationStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_AuthenticationList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Authentication"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Authentication", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_AuthenticationSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_AuthenticationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + func schema_openshift_api_config_v1_Build(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -7045,162 +7200,392 @@ func schema_openshift_api_config_v1_ConfigMapReference(ref common.ReferenceCallb } } -func schema_openshift_api_config_v1_EtcdConnectionInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_Console(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + Description: "Console holds cluster-wide information about Console. The canonical name is `cluster`", Properties: map[string]spec.Schema{ - "urls": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "URLs are the URLs for etcd", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", }, }, - "ca": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "CA is a file containing trusted roots for the etcd server certificates", + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", Type: []string{"string"}, Format: "", }, }, - "certFile": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "CertFile is a file containing a PEM-encoded certificate", - Type: []string{"string"}, - Format: "", + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "keyFile": { + "spec": { SchemaProps: spec.SchemaProps{ - Description: "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", - Type: []string{"string"}, - Format: "", + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.ConsoleSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.ConsoleStatus"), }, }, }, - Required: []string{"urls", "ca", "certFile", "keyFile"}, + Required: []string{"spec", "status"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "github.com/openshift/api/config/v1.ConsoleSpec", "github.com/openshift/api/config/v1.ConsoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_openshift_api_config_v1_EtcdStorageConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_ConsoleList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Properties: map[string]spec.Schema{ - "urls": { - SchemaProps: spec.SchemaProps{ - Description: "URLs are the URLs for etcd", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "ca": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "CA is a file containing trusted roots for the etcd server certificates", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "certFile": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "CertFile is a file containing a PEM-encoded certificate", + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", Type: []string{"string"}, Format: "", }, }, - "keyFile": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", - Type: []string{"string"}, - Format: "", + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, - "storagePrefix": { + "items": { SchemaProps: spec.SchemaProps{ - Description: "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", - Type: []string{"string"}, - Format: "", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Console"), + }, + }, + }, }, }, }, - Required: []string{"urls", "ca", "certFile", "keyFile", "storagePrefix"}, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Console", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_ConsoleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, }, }, Dependencies: []string{}, } } -func schema_openshift_api_config_v1_GenericAPIServerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_ConsoleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_DNS(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DNS holds cluster-wide information about DNS. The canonical name is `cluster`", Properties: map[string]spec.Schema{ - "servingInfo": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "ServingInfo describes how to start serving", - Ref: ref("github.com/openshift/api/config/v1.HTTPServingInfo"), + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", }, }, - "corsAllowedOrigins": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "CORSAllowedOrigins", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "auditConfig": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "AuditConfig describes how to configure audit information", - Ref: ref("github.com/openshift/api/config/v1.AuditConfig"), + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "storageConfig": { + "spec": { SchemaProps: spec.SchemaProps{ - Description: "StorageConfig contains information about how to use", - Ref: ref("github.com/openshift/api/config/v1.EtcdStorageConfig"), + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.DNSSpec"), }, }, - "admissionPluginConfig": { + "status": { SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/config/v1.AdmissionPluginConfig"), - }, - }, - }, + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.DNSStatus"), }, }, - "kubeClientConfig": { - SchemaProps: spec.SchemaProps{ + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.DNSSpec", "github.com/openshift/api/config/v1.DNSStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_DNSList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.DNS"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.DNS", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_DNSSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_DNSStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_EtcdConnectionInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + Properties: map[string]spec.Schema{ + "urls": { + SchemaProps: spec.SchemaProps{ + Description: "URLs are the URLs for etcd", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "ca": { + SchemaProps: spec.SchemaProps{ + Description: "CA is a file containing trusted roots for the etcd server certificates", + Type: []string{"string"}, + Format: "", + }, + }, + "certFile": { + SchemaProps: spec.SchemaProps{ + Description: "CertFile is a file containing a PEM-encoded certificate", + Type: []string{"string"}, + Format: "", + }, + }, + "keyFile": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"urls", "ca", "certFile", "keyFile"}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_EtcdStorageConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "urls": { + SchemaProps: spec.SchemaProps{ + Description: "URLs are the URLs for etcd", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "ca": { + SchemaProps: spec.SchemaProps{ + Description: "CA is a file containing trusted roots for the etcd server certificates", + Type: []string{"string"}, + Format: "", + }, + }, + "certFile": { + SchemaProps: spec.SchemaProps{ + Description: "CertFile is a file containing a PEM-encoded certificate", + Type: []string{"string"}, + Format: "", + }, + }, + "keyFile": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + Type: []string{"string"}, + Format: "", + }, + }, + "storagePrefix": { + SchemaProps: spec.SchemaProps{ + Description: "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"urls", "ca", "certFile", "keyFile", "storagePrefix"}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_GenericAPIServerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + Properties: map[string]spec.Schema{ + "servingInfo": { + SchemaProps: spec.SchemaProps{ + Description: "ServingInfo describes how to start serving", + Ref: ref("github.com/openshift/api/config/v1.HTTPServingInfo"), + }, + }, + "corsAllowedOrigins": { + SchemaProps: spec.SchemaProps{ + Description: "CORSAllowedOrigins", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "auditConfig": { + SchemaProps: spec.SchemaProps{ + Description: "AuditConfig describes how to configure audit information", + Ref: ref("github.com/openshift/api/config/v1.AuditConfig"), + }, + }, + "storageConfig": { + SchemaProps: spec.SchemaProps{ + Description: "StorageConfig contains information about how to use", + Ref: ref("github.com/openshift/api/config/v1.EtcdStorageConfig"), + }, + }, + "admissionPluginConfig": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.AdmissionPluginConfig"), + }, + }, + }, + }, + }, + "kubeClientConfig": { + SchemaProps: spec.SchemaProps{ Ref: ref("github.com/openshift/api/config/v1.KubeClientConfig"), }, }, @@ -7306,16 +7691,891 @@ func schema_openshift_api_config_v1_HTTPServingInfo(ref common.ReferenceCallback Required: []string{"bindAddress", "bindNetwork", "certFile", "keyFile", "clientCA", "namedCertificates", "maxRequestsInFlight", "requestTimeoutSeconds"}, }, }, - Dependencies: []string{ - "github.com/openshift/api/config/v1.NamedCertificate"}, + Dependencies: []string{ + "github.com/openshift/api/config/v1.NamedCertificate"}, + } +} + +func schema_openshift_api_config_v1_IdentityProvider(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "IdentityProvider holds cluster-wide information about IdentityProvider. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.IdentityProviderSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.IdentityProviderStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.IdentityProviderSpec", "github.com/openshift/api/config/v1.IdentityProviderStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_IdentityProviderList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.IdentityProvider"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.IdentityProvider", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_IdentityProviderSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_IdentityProviderStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_Image(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.ImageSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.ImageStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.ImageSpec", "github.com/openshift/api/config/v1.ImageStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_ImageLabel(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name defines the name of the label. It must have non-zero length.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value defines the literal value of the label.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_ImageList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Image"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Image", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_ImageSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "allowedRegistriesForImport": { + SchemaProps: spec.SchemaProps{ + Description: "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.RegistryLocation"), + }, + }, + }, + }, + }, + "externalRegistryHostname": { + SchemaProps: spec.SchemaProps{ + Description: "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + Type: []string{"string"}, + Format: "", + }, + }, + "additionalTrustedCA": { + SchemaProps: spec.SchemaProps{ + Description: "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import.", + Ref: ref("github.com/openshift/api/config/v1.ConfigMapReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.ConfigMapReference", "github.com/openshift/api/config/v1.RegistryLocation"}, + } +} + +func schema_openshift_api_config_v1_ImageStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "internalRegistryHostname": { + SchemaProps: spec.SchemaProps{ + Description: "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_Infrastructure(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.InfrastructureSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.InfrastructureStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.InfrastructureSpec", "github.com/openshift/api/config/v1.InfrastructureStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_InfrastructureList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Infrastructure"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Infrastructure", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_InfrastructureSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_InfrastructureStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_Ingress(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.IngressSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.IngressStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.IngressSpec", "github.com/openshift/api/config/v1.IngressStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_IngressList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Ingress"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Ingress", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_IngressSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_IngressStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_KubeClientConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kubeConfig": { + SchemaProps: spec.SchemaProps{ + Description: "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + Type: []string{"string"}, + Format: "", + }, + }, + "connectionOverrides": { + SchemaProps: spec.SchemaProps{ + Description: "connectionOverrides specifies client overrides for system components to loop back to this master.", + Ref: ref("github.com/openshift/api/config/v1.ClientConnectionOverrides"), + }, + }, + }, + Required: []string{"kubeConfig", "connectionOverrides"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.ClientConnectionOverrides"}, + } +} + +func schema_openshift_api_config_v1_LeaderElection(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LeaderElection provides information to elect a leader", + Properties: map[string]spec.Schema{ + "disable": { + SchemaProps: spec.SchemaProps{ + Description: "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "namespace indicates which namespace the resource is in", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name indicates what name to use for the resource", + Type: []string{"string"}, + Format: "", + }, + }, + "leaseDuration": { + SchemaProps: spec.SchemaProps{ + Description: "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "renewDeadline": { + SchemaProps: spec.SchemaProps{ + Description: "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "retryPeriod": { + SchemaProps: spec.SchemaProps{ + Description: "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_openshift_api_config_v1_NamedCertificate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NamedCertificate specifies a certificate/key, and the names it should be served for", + Properties: map[string]spec.Schema{ + "names": { + SchemaProps: spec.SchemaProps{ + Description: "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "certFile": { + SchemaProps: spec.SchemaProps{ + Description: "CertFile is a file containing a PEM-encoded certificate", + Type: []string{"string"}, + Format: "", + }, + }, + "keyFile": { + SchemaProps: spec.SchemaProps{ + Description: "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"names", "certFile", "keyFile"}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_Network(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Network holds cluster-wide information about Network. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.NetworkSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.NetworkStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.NetworkSpec", "github.com/openshift/api/config/v1.NetworkStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_NetworkList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Network"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Network", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_NetworkSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_NetworkStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_OAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.OAuthSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.OAuthStatus"), + }, + }, + }, + Required: []string{"spec", "status"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.OAuthSpec", "github.com/openshift/api/config/v1.OAuthStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_config_v1_OAuthList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.OAuth"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.OAuth", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_OAuthSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, } } -func schema_openshift_api_config_v1_Image(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_OAuthStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`", + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_Project(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Project holds cluster-wide information about Project. The canonical name is `cluster`", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -7340,13 +8600,13 @@ func schema_openshift_api_config_v1_Image(ref common.ReferenceCallback) common.O "spec": { SchemaProps: spec.SchemaProps{ Description: "spec holds user settable values for configuration", - Ref: ref("github.com/openshift/api/config/v1.ImageSpec"), + Ref: ref("github.com/openshift/api/config/v1.ProjectSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Description: "status holds observed values from the cluster. They may not be overridden.", - Ref: ref("github.com/openshift/api/config/v1.ImageStatus"), + Ref: ref("github.com/openshift/api/config/v1.ProjectStatus"), }, }, }, @@ -7354,38 +8614,11 @@ func schema_openshift_api_config_v1_Image(ref common.ReferenceCallback) common.O }, }, Dependencies: []string{ - "github.com/openshift/api/config/v1.ImageSpec", "github.com/openshift/api/config/v1.ImageStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_openshift_api_config_v1_ImageLabel(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name defines the name of the label. It must have non-zero length.", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value defines the literal value of the label.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{}, + "github.com/openshift/api/config/v1.ProjectSpec", "github.com/openshift/api/config/v1.ProjectStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_openshift_api_config_v1_ImageList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_ProjectList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -7416,7 +8649,7 @@ func schema_openshift_api_config_v1_ImageList(ref common.ReferenceCallback) comm Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/config/v1.Image"), + Ref: ref("github.com/openshift/api/config/v1.Project"), }, }, }, @@ -7427,168 +8660,80 @@ func schema_openshift_api_config_v1_ImageList(ref common.ReferenceCallback) comm }, }, Dependencies: []string{ - "github.com/openshift/api/config/v1.Image", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "github.com/openshift/api/config/v1.Project", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_openshift_api_config_v1_ImageSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_ProjectSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Properties: map[string]spec.Schema{ - "allowedRegistriesForImport": { - SchemaProps: spec.SchemaProps{ - Description: "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/config/v1.RegistryLocation"), - }, - }, - }, - }, - }, - "externalRegistryHostname": { - SchemaProps: spec.SchemaProps{ - Description: "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", - Type: []string{"string"}, - Format: "", - }, - }, - "additionalTrustedCA": { - SchemaProps: spec.SchemaProps{ - Description: "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import.", - Ref: ref("github.com/openshift/api/config/v1.ConfigMapReference"), - }, - }, - }, + Properties: map[string]spec.Schema{}, }, }, - Dependencies: []string{ - "github.com/openshift/api/config/v1.ConfigMapReference", "github.com/openshift/api/config/v1.RegistryLocation"}, + Dependencies: []string{}, } } -func schema_openshift_api_config_v1_ImageStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_ProjectStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Properties: map[string]spec.Schema{ - "internalRegistryHostname": { - SchemaProps: spec.SchemaProps{ - Description: "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", - Type: []string{"string"}, - Format: "", - }, - }, - }, + Properties: map[string]spec.Schema{}, }, }, Dependencies: []string{}, } } -func schema_openshift_api_config_v1_KubeClientConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_RegistryLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", Properties: map[string]spec.Schema{ - "kubeConfig": { + "domainName": { SchemaProps: spec.SchemaProps{ - Description: "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + Description: "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", Type: []string{"string"}, Format: "", }, }, - "connectionOverrides": { + "insecure": { SchemaProps: spec.SchemaProps{ - Description: "connectionOverrides specifies client overrides for system components to loop back to this master.", - Ref: ref("github.com/openshift/api/config/v1.ClientConnectionOverrides"), + Description: "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + Type: []string{"boolean"}, + Format: "", }, }, }, - Required: []string{"kubeConfig", "connectionOverrides"}, + Required: []string{"domainName"}, }, }, - Dependencies: []string{ - "github.com/openshift/api/config/v1.ClientConnectionOverrides"}, + Dependencies: []string{}, } } -func schema_openshift_api_config_v1_LeaderElection(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_RemoteConnectionInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "LeaderElection provides information to elect a leader", + Description: "RemoteConnectionInfo holds information necessary for establishing a remote connection", Properties: map[string]spec.Schema{ - "disable": { - SchemaProps: spec.SchemaProps{ - Description: "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "namespace": { + "url": { SchemaProps: spec.SchemaProps{ - Description: "namespace indicates which namespace the resource is in", + Description: "URL is the remote URL to connect to", Type: []string{"string"}, Format: "", }, }, - "name": { + "ca": { SchemaProps: spec.SchemaProps{ - Description: "name indicates what name to use for the resource", + Description: "CA is the CA for verifying TLS connections", Type: []string{"string"}, Format: "", }, }, - "leaseDuration": { - SchemaProps: spec.SchemaProps{ - Description: "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "renewDeadline": { - SchemaProps: spec.SchemaProps{ - Description: "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "retryPeriod": { - SchemaProps: spec.SchemaProps{ - Description: "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - } -} - -func schema_openshift_api_config_v1_NamedCertificate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NamedCertificate specifies a certificate/key, and the names it should be served for", - Properties: map[string]spec.Schema{ - "names": { - SchemaProps: spec.SchemaProps{ - Description: "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, "certFile": { SchemaProps: spec.SchemaProps{ Description: "CertFile is a file containing a PEM-encoded certificate", @@ -7604,77 +8749,122 @@ func schema_openshift_api_config_v1_NamedCertificate(ref common.ReferenceCallbac }, }, }, - Required: []string{"names", "certFile", "keyFile"}, + Required: []string{"url", "ca", "certFile", "keyFile"}, }, }, Dependencies: []string{}, } } -func schema_openshift_api_config_v1_RegistryLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_Scheduling(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + Description: "Scheduling holds cluster-wide information about Scheduling. The canonical name is `cluster`", Properties: map[string]spec.Schema{ - "domainName": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "insecure": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", - Type: []string{"boolean"}, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, Format: "", }, }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "spec holds user settable values for configuration", + Ref: ref("github.com/openshift/api/config/v1.SchedulingSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds observed values from the cluster. They may not be overridden.", + Ref: ref("github.com/openshift/api/config/v1.SchedulingStatus"), + }, + }, }, - Required: []string{"domainName"}, + Required: []string{"spec", "status"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "github.com/openshift/api/config/v1.SchedulingSpec", "github.com/openshift/api/config/v1.SchedulingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_openshift_api_config_v1_RemoteConnectionInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_SchedulingList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RemoteConnectionInfo holds information necessary for establishing a remote connection", Properties: map[string]spec.Schema{ - "url": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "URL is the remote URL to connect to", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "ca": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "CA is the CA for verifying TLS connections", + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", Type: []string{"string"}, Format: "", }, }, - "certFile": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "CertFile is a file containing a PEM-encoded certificate", - Type: []string{"string"}, - Format: "", + Description: "Standard object's metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, - "keyFile": { + "items": { SchemaProps: spec.SchemaProps{ - Description: "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", - Type: []string{"string"}, - Format: "", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/config/v1.Scheduling"), + }, + }, + }, }, }, }, - Required: []string{"url", "ca", "certFile", "keyFile"}, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/config/v1.Scheduling", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_config_v1_SchedulingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, + }, + }, + Dependencies: []string{}, + } +} + +func schema_openshift_api_config_v1_SchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{}, }, }, Dependencies: []string{}, diff --git a/pkg/test/ginkgo/cmd_runsuite.go b/pkg/test/ginkgo/cmd_runsuite.go new file mode 100644 index 000000000000..f4808f6e7e6a --- /dev/null +++ b/pkg/test/ginkgo/cmd_runsuite.go @@ -0,0 +1,249 @@ +package ginkgo + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/signal" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/config" +) + +// Options is used to run a suite of tests by invoking each test +// as a call to a child worker (the run-tests command). +type Options struct { + Parallelism int + Timeout time.Duration + JUnitDir string + TestFile string + OutFile string + DetectFlakes int + + Provider string + + Suites []*TestSuite + + DryRun bool + Out, ErrOut io.Writer +} + +func (opt *Options) Run(args []string) error { + var suite *TestSuite + + if len(opt.TestFile) > 0 { + var in []byte + var err error + if opt.TestFile == "-" { + in, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + } else { + in, err = ioutil.ReadFile(opt.TestFile) + } + if err != nil { + return err + } + suite, err = newSuiteFromFile("files", in) + if err != nil { + return fmt.Errorf("could not read test suite from input: %v", err) + } + } + + if suite == nil && len(args) == 0 { + fmt.Fprintf(opt.ErrOut, SuitesString(opt.Suites, "Select a test suite to run against the server:\n\n")) + return fmt.Errorf("specify a test suite to run, for example: %s run %s", filepath.Base(os.Args[0]), opt.Suites[0].Name) + } + if suite == nil && len(args) > 0 { + for _, s := range opt.Suites { + if s.Name == args[0] { + suite = s + break + } + } + } + if suite == nil { + fmt.Fprintf(opt.ErrOut, SuitesString(opt.Suites, "Select a test suite to run against the server:\n\n")) + return fmt.Errorf("suite %q does not exist", args[0]) + } + + tests, err := testsForSuite(config.GinkgoConfig) + if err != nil { + return err + } + + // This ensures that tests in the identified paths do not run in parallel, because + // the test suite reuses shared resources without considering whether another test + // could be running at the same time. While these are technically [Serial], ginkgo + // parallel mode provides this guarantee. Doing this for all suites would be too + // slow. + setTestExclusion(tests, func(suitePath string, t *testCase) bool { + for _, name := range []string{ + "/k8s.io/kubernetes/test/e2e/apps/disruption.go", + } { + if strings.HasSuffix(suitePath, name) { + return true + } + } + return false + }) + + tests = suite.Filter(tests) + if len(tests) == 0 { + return fmt.Errorf("suite %q does not contain any tests", suite.Name) + } + + if opt.DryRun { + // if true { + // bySuite := make(map[string][]*testCase) + // for _, test := range tests { + // bySuite[test.testExclusion] = append(bySuite[test.testExclusion], test) + // } + // var names []string + // for k := range bySuite { + // names = append(names, k) + // } + // sort.Slice(names, func(i, j int) bool { + // return len(bySuite[names[i]]) > len(bySuite[names[j]]) + // }) + // for _, name := range names { + // if len(name) == 0 { + // fmt.Fprintf(out, ":\n") + // } else { + // fmt.Fprintf(out, "%s:\n", name) + // } + // for _, test := range sortedTests(bySuite[name]) { + // fmt.Fprintf(out, " %q\n", test.name) + // } + // } + // return nil + // } + for _, test := range sortedTests(tests) { + fmt.Fprintf(opt.Out, "%q\n", test.name) + } + return nil + } + + if len(opt.JUnitDir) > 0 { + if _, err := os.Stat(opt.JUnitDir); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("could not access --junit-dir: %v", err) + } + if err := os.MkdirAll(opt.JUnitDir, 0755); err != nil { + return fmt.Errorf("could not create --junit-dir: %v", err) + } + } + } + + parallelism := opt.Parallelism + if parallelism == 0 { + parallelism = suite.Parallelism + } + if parallelism == 0 { + parallelism = 10 + } + timeout := opt.Timeout + if timeout == 0 { + timeout = suite.TestTimeout + } + if timeout == 0 { + timeout = 10 * time.Minute + } + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + abortCh := make(chan os.Signal) + go func() { + <-abortCh + fmt.Fprintf(opt.ErrOut, "Interrupted, terminating tests\n") + cancelFn() + sig := <-abortCh + fmt.Fprintf(opt.ErrOut, "Interrupted twice, exiting (%s)\n", sig) + switch sig { + case syscall.SIGINT: + os.Exit(130) + default: + os.Exit(0) + } + }() + signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM) + + status := newTestStatus(opt.Out, len(tests), timeout) + + smoke, normal := splitTests(tests, func(t *testCase) bool { + return strings.Contains(t.name, "[Smoke]") + }) + + // run the tests + start := time.Now() + + // run our smoke tests first + q := newParallelTestQueue(smoke) + q.Execute(ctx, parallelism, status.Run) + + // run other tests next + q = newParallelTestQueue(normal) + q.Execute(ctx, parallelism, status.Run) + + duration := time.Now().Sub(start).Round(time.Second / 10) + if duration > time.Minute { + duration = duration.Round(time.Second) + } + + pass, fail, skip, failing := summarizeTests(tests) + + if opt.DetectFlakes > 0 && fail > 0 { + var retries []*testCase + for _, test := range failing { + retries = append(retries, test.Retry()) + if len(retries) > opt.DetectFlakes { + break + } + } + + q := newParallelTestQueue(retries) + buf := &bytes.Buffer{} + status := newTestStatus(buf, len(retries), timeout) + q.Execute(ctx, parallelism, status.Run) + var flaky []string + var repeatFailures []*testCase + for _, test := range retries { + if test.success { + flaky = append(flaky, test.name) + } else { + repeatFailures = append(repeatFailures, test) + } + } + if len(flaky) > 0 { + failing = repeatFailures + sort.Strings(flaky) + fmt.Fprintf(opt.ErrOut, "Flaky tests:\n\n%s\n\n", strings.Join(flaky, "\n")) + } + } + + if fail > 0 { + names := testNames(failing) + sort.Strings(names) + fmt.Fprintf(opt.ErrOut, "Failing tests:\n\n%s\n\n", strings.Join(names, "\n")) + } + + if len(opt.JUnitDir) > 0 { + if err := writeJUnitReport("openshift-tests", tests, opt.JUnitDir, duration, opt.ErrOut); err != nil { + fmt.Fprintf(opt.ErrOut, "error: Unable to write JUnit results: %v", err) + } + } + + if fail > 0 { + return fmt.Errorf("%d fail, %d pass, %d skip (%s)", fail, pass, skip, duration) + } + fmt.Fprintf(opt.ErrOut, "%d pass, %d skip (%s)\n", pass, skip, duration) + return nil +} diff --git a/pkg/test/ginkgo/cmd_runtest.go b/pkg/test/ginkgo/cmd_runtest.go new file mode 100644 index 000000000000..e6bd23821105 --- /dev/null +++ b/pkg/test/ginkgo/cmd_runtest.go @@ -0,0 +1,98 @@ +package ginkgo + +import ( + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +// TestOptions handles running a single test. +type TestOptions struct { + DryRun bool + Out, ErrOut io.Writer +} + +func (opt *TestOptions) Run(args []string) error { + if len(args) != 1 { + return fmt.Errorf("only a single test name may be passed") + } + + tests, err := testsForSuite(config.GinkgoConfig) + if err != nil { + return err + } + var test *testCase + for _, t := range tests { + if t.name == args[0] { + test = t + break + } + } + if test == nil { + return fmt.Errorf("no test exists with that name") + } + + if opt.DryRun { + fmt.Printf("Running test (dry-run)\n") + return nil + } + + config.GinkgoConfig.FocusString = fmt.Sprintf("^%s$", regexp.QuoteMeta(" [Top Level] "+test.name)) + config.DefaultReporterConfig.NoColor = true + w := ginkgo.GinkgoWriterType() + w.SetStream(true) + reporter := NewMinimalReporter(test.name, test.location) + ginkgo.GlobalSuite().Run(reporter, "", []reporters.Reporter{reporter}, w, config.GinkgoConfig) + summary, setup := reporter.Summary() + if summary == nil && setup != nil { + summary = &types.SpecSummary{ + Failure: setup.Failure, + State: setup.State, + } + } + + // TODO: print stack line? + switch { + case summary == nil: + return fmt.Errorf("test suite set up failed, see logs") + case summary.Passed(): + case summary.Skipped(): + if len(summary.Failure.Message) > 0 { + fmt.Fprintf(os.Stderr, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) + } + if len(summary.Failure.ForwardedPanic) > 0 { + fmt.Fprintf(os.Stderr, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) + } + os.Exit(3) + case summary.Failed(), summary.Panicked(): + if len(summary.Failure.ForwardedPanic) > 0 { + if len(summary.Failure.Location.FullStackTrace) > 0 { + fmt.Fprintf(os.Stderr, "\n%s\n", summary.Failure.Location.FullStackTrace) + } + fmt.Fprintf(os.Stderr, "fail [%s:%d]: Test Panicked: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "fail [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) + os.Exit(1) + default: + return fmt.Errorf("unrecognized test case outcome: %#v", summary) + } + return nil +} + +func lastFilenameSegment(filename string) string { + if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 { + return parts[len(parts)-1] + } + if parts := strings.Split(filename, "/src/"); len(parts) > 1 { + return parts[len(parts)-1] + } + return filename +} diff --git a/pkg/test/ginkgo/ginkgo.go b/pkg/test/ginkgo/ginkgo.go new file mode 100644 index 000000000000..9c349e1c267e --- /dev/null +++ b/pkg/test/ginkgo/ginkgo.go @@ -0,0 +1,91 @@ +package ginkgo + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +func testsForSuite(cfg config.GinkgoConfigType) ([]*testCase, error) { + iter := ginkgo.GlobalSuite().Iterator(cfg) + var tests []*testCase + for { + spec, err := iter.Next() + if err != nil { + if err.Error() == "no more specs to run" { + break + } + return nil, err + } + tests = append(tests, newTestCase(spec)) + } + return tests, nil +} + +type ginkgoSpec interface { + Run(io.Writer) + ConcatenatedString() string + Skip() + Skipped() bool + Failed() bool + Passed() bool + Summary(suiteID string) *types.SpecSummary +} + +type MinimalReporter struct { + name string + location types.CodeLocation + spec *types.SpecSummary + setup *types.SetupSummary +} + +func NewMinimalReporter(name string, location types.CodeLocation) *MinimalReporter { + return &MinimalReporter{ + name: name, + location: location, + } +} + +func (r *MinimalReporter) Fail() { +} + +func (r *MinimalReporter) Summary() (*types.SpecSummary, *types.SetupSummary) { + return r.spec, r.setup +} + +func (r *MinimalReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { +} + +func (r *MinimalReporter) BeforeSuiteDidRun(setup *types.SetupSummary) { + r.setup = setup +} + +func (r *MinimalReporter) SpecWillRun(spec *types.SpecSummary) { +} + +func (r *MinimalReporter) SpecDidComplete(spec *types.SpecSummary) { + if spec.ComponentCodeLocations[len(spec.ComponentCodeLocations)-1] != r.location { + return + } + if specName(spec) != r.name { + return + } + if r.spec != nil { + panic(fmt.Sprintf("spec was set twice: %q and %q", specName(r.spec), specName(spec))) + } + r.spec = spec +} + +func (r *MinimalReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { +} + +func (r *MinimalReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { +} + +func specName(spec *types.SpecSummary) string { + return strings.Join(spec.ComponentTexts[1:], " ") +} diff --git a/pkg/test/ginkgo/junit.go b/pkg/test/ginkgo/junit.go new file mode 100644 index 000000000000..7c6604133af7 --- /dev/null +++ b/pkg/test/ginkgo/junit.go @@ -0,0 +1,195 @@ +package ginkgo + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strings" + "time" +) + +// The below types are directly marshalled into XML. The types correspond to jUnit +// XML schema, but do not contain all valid fields. For instance, the class name +// field for test cases is omitted, as this concept does not directly apply to Go. +// For XML specifications see http://help.catchsoftware.com/display/ET/JUnit+Format +// or view the XSD included in this package as 'junit.xsd' + +// TestSuites represents a flat collection of jUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + + // Suites are the jUnit test suites held in this collection + Suites []*JUnitTestSuite `xml:"testsuite"` +} + +// TestSuite represents a single jUnit test suite, potentially holding child suites. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + + // Name is the name of the test suite + Name string `xml:"name,attr"` + + // NumTests records the number of tests in the TestSuite + NumTests uint `xml:"tests,attr"` + + // NumSkipped records the number of skipped tests in the suite + NumSkipped uint `xml:"skipped,attr"` + + // NumFailed records the number of failed tests in the suite + NumFailed uint `xml:"failures,attr"` + + // Duration is the time taken in seconds to run all tests in the suite + Duration float64 `xml:"time,attr"` + + // Properties holds other properties of the test suite as a mapping of name to value + Properties []*TestSuiteProperty `xml:"properties,omitempty"` + + // TestCases are the test cases contained in the test suite + TestCases []*JUnitTestCase `xml:"testcase"` + + // Children holds nested test suites + Children []*JUnitTestSuite `xml:"testsuite"` +} + +// TestSuiteProperty contains a mapping of a property name to a value +type TestSuiteProperty struct { + XMLName xml.Name `xml:"property"` + + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitTestCase represents a jUnit test case +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + + // Name is the name of the test case + Name string `xml:"name,attr"` + + // Classname is an attribute set by the package type and is required + Classname string `xml:"classname,attr,omitempty"` + + // Duration is the time taken in seconds to run the test + Duration float64 `xml:"time,attr"` + + // SkipMessage holds the reason why the test was skipped + SkipMessage *SkipMessage `xml:"skipped"` + + // FailureOutput holds the output from a failing test + FailureOutput *FailureOutput `xml:"failure"` + + // SystemOut is output written to stdout during the execution of this test case + SystemOut string `xml:"system-out,omitempty"` + + // SystemErr is output written to stderr during the execution of this test case + SystemErr string `xml:"system-err,omitempty"` +} + +// SkipMessage holds a message explaining why a test was skipped +type SkipMessage struct { + XMLName xml.Name `xml:"skipped"` + + // Message explains why the test was skipped + Message string `xml:"message,attr,omitempty"` +} + +// FailureOutput holds the output from a failing test +type FailureOutput struct { + XMLName xml.Name `xml:"failure"` + + // Message holds the failure message from the test + Message string `xml:"message,attr"` + + // Output holds verbose failure output from the test + Output string `xml:",chardata"` +} + +// TestResult is the result of a test case +type TestResult string + +const ( + TestResultPass TestResult = "pass" + TestResultSkip TestResult = "skip" + TestResultFail TestResult = "fail" +) + +func writeJUnitReport(name string, tests []*testCase, dir string, duration time.Duration, errOut io.Writer) error { + s := &JUnitTestSuite{ + Name: name, + Duration: duration.Seconds(), + } + for _, test := range tests { + switch { + case test.skipped: + s.NumTests++ + s.TestCases = append(s.TestCases, &JUnitTestCase{ + Name: test.name, + SystemOut: string(test.out), + Duration: test.duration.Seconds(), + SkipMessage: &SkipMessage{ + Message: lastLinesUntil(string(test.out), 40, "skip ["), + }, + }) + case test.failed: + s.NumTests++ + s.NumSkipped++ + s.TestCases = append(s.TestCases, &JUnitTestCase{ + Name: test.name, + SystemOut: string(test.out), + Duration: test.duration.Seconds(), + FailureOutput: &FailureOutput{ + Output: lastLinesUntil(string(test.out), 40, "fail ["), + }, + }) + case test.success: + s.NumTests++ + s.NumFailed++ + s.TestCases = append(s.TestCases, &JUnitTestCase{ + Name: test.name, + Duration: test.duration.Seconds(), + }) + } + } + out, err := xml.Marshal(s) + if err != nil { + return err + } + path := filepath.Join(dir, fmt.Sprintf("junit_%s.xml", time.Now().UTC().Format("2006-01-02T150405"))) + fmt.Fprintf(errOut, "Writing JUnit report to %s\n\n", path) + return ioutil.WriteFile(path, out, 0640) +} + +func lastLinesUntil(output string, max int, until ...string) string { + output = strings.TrimSpace(output) + index := len(output) - 1 + if index < 0 || max == 0 { + return output + } + for max > 0 { + next := strings.LastIndex(output[:index], "\n") + if next <= 0 { + return strings.TrimSpace(output) + } + // empty lines don't count + line := strings.TrimSpace(output[next+1 : index]) + if len(line) > 0 { + max-- + } + index = next + if stringStartsWithAny(line, until) { + break + } + } + return strings.TrimSpace(output[index:]) +} + +func stringStartsWithAny(s string, contains []string) bool { + for _, match := range contains { + if strings.HasPrefix(s, match) { + return true + } + } + return false +} diff --git a/pkg/test/ginkgo/junit_test.go b/pkg/test/ginkgo/junit_test.go new file mode 100644 index 000000000000..2db9c20d430b --- /dev/null +++ b/pkg/test/ginkgo/junit_test.go @@ -0,0 +1,35 @@ +package ginkgo + +import "testing" + +func Test_lastLines(t *testing.T) { + tests := []struct { + name string + output string + max int + matches []string + want string + }{ + {output: "", max: 0, want: ""}, + {output: "", max: 1, want: ""}, + {output: "test", max: 1, want: "test"}, + {output: "test\n", max: 1, want: "test"}, + {output: "test\nother", max: 1, want: "other"}, + {output: "test\nother\n", max: 1, want: "other"}, + {output: "test\nother\n", max: 2, want: "test\nother"}, + {output: "test\nother\n", max: 3, want: "test\nother"}, + {output: "test\n\n\nother\n", max: 2, want: "test\n\n\nother"}, + + {output: "test\n\n\nother and stuff\n", max: 2, matches: []string{"other"}, want: "other and stuff"}, + {output: "test\n\n\nother\n", max: 2, matches: []string{"test"}, want: "test\n\n\nother"}, + {output: "test\n\n\nother\n", max: 1, matches: []string{"test"}, want: "other"}, + {output: "test\ntest\n\n\nother\n", max: 10, matches: []string{"test"}, want: "test\n\n\nother"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := lastLinesUntil(tt.output, tt.max, tt.matches...); got != tt.want { + t.Errorf("lastLines() = %q, want %q", got, tt.want) + } + }) + } +} diff --git a/pkg/test/ginkgo/queue.go b/pkg/test/ginkgo/queue.go new file mode 100644 index 000000000000..dd508845188d --- /dev/null +++ b/pkg/test/ginkgo/queue.go @@ -0,0 +1,161 @@ +package ginkgo + +import ( + "container/ring" + "context" + "strings" + "sync" +) + +// parallelByFileTestQueue runs tests in parallel unless they have +// the `[Serial]` tag on their name or if another test with the +// testExclusion field is currently running. Serial tests are +// defered until all other tests are completed. +type parallelByFileTestQueue struct { + cond *sync.Cond + lock sync.Mutex + queue *ring.Ring + active map[string]struct{} +} + +type nopLock struct{} + +func (nopLock) Lock() {} +func (nopLock) Unlock() {} + +type TestFunc func(ctx context.Context, test *testCase) + +func newParallelTestQueue(tests []*testCase) *parallelByFileTestQueue { + r := ring.New(len(tests)) + for _, test := range tests { + r.Value = test + r = r.Next() + } + q := ¶llelByFileTestQueue{ + cond: sync.NewCond(nopLock{}), + queue: r, + active: make(map[string]struct{}), + } + return q +} + +func (q *parallelByFileTestQueue) pop() (*testCase, bool) { + q.lock.Lock() + defer q.lock.Unlock() + r := q.queue + l := r.Len() + if l == 0 { + q.cond.Broadcast() + return nil, true + } + for i := 0; i < l; i++ { + t := r.Value.(*testCase) + if _, ok := q.active[t.testExclusion]; ok { + r = r.Next() + continue + } + if len(t.testExclusion) > 0 { + q.active[t.testExclusion] = struct{}{} + } + if l == 1 { + q.queue = nil + } else { + q.queue = r.Prev() + q.queue.Unlink(1) + } + return t, true + } + return nil, false +} + +func (q *parallelByFileTestQueue) done(t *testCase) { + q.lock.Lock() + defer q.lock.Unlock() + delete(q.active, t.testExclusion) + q.cond.Broadcast() +} + +func (q *parallelByFileTestQueue) Close() { + q.lock.Lock() + defer q.lock.Unlock() + q.queue = nil + q.active = make(map[string]struct{}) + q.cond.Broadcast() +} + +func (q *parallelByFileTestQueue) Take(ctx context.Context, fn TestFunc) bool { + for { + test, ok := q.pop() + if !ok { + q.cond.Wait() + continue + } + if test == nil { + return false + } + defer q.done(test) + fn(ctx, test) + return true + } +} + +func (q *parallelByFileTestQueue) Execute(parentCtx context.Context, parallelism int, fn TestFunc) { + go func() { + <-parentCtx.Done() + q.Close() + }() + var serial []*testCase + var wg sync.WaitGroup + wg.Add(parallelism) + for i := 0; i < parallelism; i++ { + go func(i int) { + for q.Take(parentCtx, func(ctx context.Context, test *testCase) { + if strings.Contains(test.name, "[Serial]") { + serial = append(serial, test) + return + } + fn(ctx, test) + }) { + // no-op + } + wg.Done() + }(i) + } + wg.Wait() + for _, test := range serial { + select { + case <-parentCtx.Done(): + return + default: + } + fn(parentCtx, test) + } +} + +func setTestExclusion(tests []*testCase, fn func(suitePath string, t *testCase) bool) { + for _, test := range tests { + summary := test.spec.Summary("") + var suitePath string + for _, loc := range summary.ComponentCodeLocations { + if len(loc.FileName) > 0 { + if !strings.HasSuffix(loc.FileName, "/k8s.io/kubernetes/test/e2e/framework/framework.go") { + suitePath = loc.FileName + } + } + } + if fn(suitePath, test) { + test.testExclusion = suitePath + } + } +} + +func splitTests(tests []*testCase, fn func(*testCase) bool) (a, b []*testCase) { + for _, t := range tests { + if fn(t) { + a = append(a, t) + } else { + b = append(b, t) + } + } + return a, b +} diff --git a/pkg/test/ginkgo/status.go b/pkg/test/ginkgo/status.go new file mode 100644 index 000000000000..897231e3033d --- /dev/null +++ b/pkg/test/ginkgo/status.go @@ -0,0 +1,118 @@ +package ginkgo + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "sort" + "sync" + "syscall" + "time" +) + +type testStatus struct { + out io.Writer + timeout time.Duration + + lock sync.Mutex + failures int + index int + total int +} + +func newTestStatus(out io.Writer, total int, timeout time.Duration) *testStatus { + return &testStatus{ + out: out, + total: total, + timeout: timeout, + } +} + +func (s *testStatus) Failure() { + s.lock.Lock() + defer s.lock.Unlock() + s.failures++ +} + +func (s *testStatus) Fprintf(format string) { + s.lock.Lock() + defer s.lock.Unlock() + if s.index < s.total { + s.index++ + } + fmt.Fprintf(s.out, format, s.failures, s.index, s.total) +} + +func (s *testStatus) Run(ctx context.Context, test *testCase) { + defer func() { + switch { + case test.success: + fmt.Fprint(s.out, string(test.out)+fmt.Sprintf("\npassed: (%s) %q\n\n", test.duration, test.name)) + case test.skipped: + fmt.Fprint(s.out, string(test.out)+fmt.Sprintf("\nskipped: (%s) %q\n\n", test.duration, test.name)) + case test.failed: + fmt.Fprint(s.out, string(test.out)+fmt.Sprintf("\nfailed: (%s) %q\n\n", test.duration, test.name)) + s.Failure() + } + }() + + test.start = time.Now() + c := exec.Command(os.Args[0], "run-test", test.name) + s.Fprintf(fmt.Sprintf("started: (%s) %q\n\n", "%d/%d/%d", test.name)) + out, err := runWithTimeout(ctx, c, s.timeout) + test.end = time.Now() + + duration := test.end.Sub(test.start).Round(time.Second / 10) + if duration > time.Minute { + duration = duration.Round(time.Second) + } + test.duration = duration + test.out = out + if err == nil { + test.success = true + return + } + if exitErr, ok := err.(*exec.ExitError); ok { + switch exitErr.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() { + case 1: + // failed + test.failed = true + case 2: + // timeout (ABRT is an exit code 2) + test.failed = true + case 3: + // skipped + test.skipped = true + default: + test.failed = true + } + return + } + test.failed = true +} + +func summarizeTests(tests []*testCase) (int, int, int, []*testCase) { + var pass, fail, skip int + var failingTests []*testCase + for _, t := range tests { + switch { + case t.success: + pass++ + case t.failed: + fail++ + failingTests = append(failingTests, t) + case t.skipped: + skip++ + } + } + return pass, fail, skip, failingTests +} + +func sortedTests(tests []*testCase) []*testCase { + copied := make([]*testCase, len(tests)) + copy(copied, tests) + sort.Slice(copied, func(i, j int) bool { return copied[i].name < copied[j].name }) + return copied +} diff --git a/pkg/test/ginkgo/test.go b/pkg/test/ginkgo/test.go new file mode 100644 index 000000000000..2348f065acbf --- /dev/null +++ b/pkg/test/ginkgo/test.go @@ -0,0 +1,148 @@ +package ginkgo + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "strconv" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/types" +) + +type testCase struct { + name string + spec ginkgoSpec + location types.CodeLocation + + // identifies which tests can be run in parallel (ginkgo runs suites linearly) + testExclusion string + + start time.Time + end time.Time + duration time.Duration + out []byte + success bool + failed bool + skipped bool + + previous *testCase +} + +func newTestCase(spec ginkgoSpec) *testCase { + name := spec.ConcatenatedString() + name = strings.TrimPrefix(name, "[Top Level] ") + summary := spec.Summary("") + return &testCase{ + name: name, + spec: spec, + location: summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1], + } +} + +func (t *testCase) Retry() *testCase { + copied := &testCase{ + name: t.name, + spec: t.spec, + location: t.location, + testExclusion: t.testExclusion, + + previous: t, + } + return copied +} + +type TestSuite struct { + Name string + Description string + + Matches func(name string) bool + + Parallelism int + + TestTimeout time.Duration +} + +func (s *TestSuite) Filter(tests []*testCase) []*testCase { + matches := make([]*testCase, 0, len(tests)) + for _, test := range tests { + if !s.Matches(test.name) { + continue + } + matches = append(matches, test) + } + return matches +} + +func newSuiteFromFile(name string, contents []byte) (*TestSuite, error) { + suite := &TestSuite{ + Name: name, + } + tests := make(map[string]int) + for _, line := range strings.Split(string(contents), "\n") { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "\"") { + var err error + line, err = strconv.Unquote(line) + if err != nil { + return nil, err + } + tests[line]++ + } + } + suite.Matches = func(name string) bool { + _, ok := tests[name] + return ok + } + return suite, nil +} + +func testNames(tests []*testCase) []string { + var names []string + for _, t := range tests { + names = append(names, t.name) + } + return names +} + +// SuitesString returns a string with the provided suites formatted. Prefix is +// printed at the beginning of the output. +func SuitesString(suites []*TestSuite, prefix string) string { + buf := &bytes.Buffer{} + fmt.Fprintf(buf, prefix) + for _, suite := range suites { + fmt.Fprintf(buf, "%s\n %s\n\n", suite.Name, suite.Description) + } + return buf.String() +} + +func runWithTimeout(ctx context.Context, c *exec.Cmd, timeout time.Duration) ([]byte, error) { + if timeout > 0 { + go func() { + select { + // interrupt tests after timeout, and abort if they don't complete quick enough + case <-time.After(timeout): + if c.Process != nil { + c.Process.Signal(syscall.SIGINT) + } + // if the process appears to be hung a significant amount of time after the timeout + // send an ABRT so we get a stack dump + select { + case <-time.After(time.Minute): + if c.Process != nil { + c.Process.Signal(syscall.SIGABRT) + } + } + case <-ctx.Done(): + if c.Process != nil { + c.Process.Signal(syscall.SIGINT) + } + } + + }() + } + return c.CombinedOutput() +} diff --git a/test/extended/README.md b/test/extended/README.md index 8716320a3f90..11a1b927d7ed 100644 --- a/test/extended/README.md +++ b/test/extended/README.md @@ -3,68 +3,38 @@ This document describes how a developer can write a new extended test for OpenShift and the structure of extended tests. -Running tests -------------- - -From the top-level origin directory, run - - $ test/extended/.sh - -Where \.sh is one of the bucket scripts such as "core.sh". -You can further narrow the set of tests being run by setting the environment -variable `FOCUS='regex'` where 'regex' is a regular expression matching the -description of the test you want to run. For example one of the s2i tests -(s2i_incremental.go) defines: +Prerequisites +------------- - var _ = g.Describe("[Feature:Builds][Slow] incremental s2i build", func() { +* Compile both `oc` and `openshift-tests` in this repository (with `make WHAT=cmd/openshift-tests`) +* Have the environment variable `KUBECONFIG` set pointing to your cluster. -So you can write a focus regex that includes this test by setting -`FOCUS='\[Feature:Builds\]'` or `FOCUS='incremental s2i'`. -Prerequisites +Running Tests ------------- -In order to execute the extended tests, you have to install -[Ginkgo](https://github.com/onsi/ginkgo) framework which is used in extended -tests. You can do it by running following command: +To run a test by name: ```console -$ go get github.com/onsi/ginkgo/ginkgo +$ openshift-tests run-test ``` -You also need to have the `openshift` binary in the `PATH` if you want to use -the shell script helpers to execute the extended tests. - -Rapid local testing --------------------- - -If you already have a running OpenShift cluster, you can skip having the -extended test infrastructure spin up an OpenShift cluster each time the -tests are run by setting the `TEST_ONLY` environment variable as follows: +To see the list of suites available, run: ```console -$ oc login -u system:admin -$ export KUBECONFIG=${KUBECONFIG-$HOME/.kube/config} +$ openshift-tests help run ``` -Then, for example: -```console -$ make build-extended-test -$ FOCUS='\[Feature:Builds\]' TEST_ONLY=1 test/extended/core.sh -``` +See the description on the test for more info about what prerequites may exist for the test. -By default the Kubernetes test framework will remove the project associated with -your test spec when it completes, regardless of whether it fails or not. -Origin's wrapper scripts may also do clean-up. Running tests in parallel can -also hinder debugging. To stop these behaviours, set the `SKIP_TEARDOWN` -environment variable, set `DELETE_NAMESPACE=false`, and set `PARALLEL_NODES=1`: +To run a subset of tests using a regexp, run: ```console -$ make build-extended-test -$ FOCUS='\[Feature:Builds\]' TEST_ONLY=1 SKIP_TEARDOWN=1 DELETE_NAMESPACE=false PARALLEL_NODES=1 test/extended/core.sh +$ openshift-tests run openshift/all --dry-run | grep -E "" | openshift-tests -f - ``` + Test labels ----------- @@ -107,12 +77,6 @@ The structure of this directory is following: access to the Kubernetes [E2E framework](https://github.com/openshift/origin/tree/master/vendor/k8s.io/kubernetes/test/e2e) helpers. It also contains OpenShift helpers that are shared across multiple test cases, to make the test cases more DRY. * [**`test/extended/fixtures`**](fixtures) contains the JSON and YAML fixtures that are meant to be used by the extended tests. * [**`test/extended/[images,builds,...]`**](builds) each of these Go packages contains extended tests that are related to each other. For example, the `images` directory should contain test cases that are exercising usage of various Docker images in OpenShift. -* [**`hack/test-extended/[group]/run.sh`**](../../hack/test-extended) is the shell script that sets up any needed dependencies and then launches the extended tests whose top level ginkgo spec's Describe call reference the [group](#groups-vs-packages) -* [**`test/extended/extended_test.go`**](extended_test.go) is a runner for all extended test packages. Look inside this file to see how you can add new extended test Go package to be compiled: -```go - _ "github.com/openshift/origin/test/extended/builds" - _ "github.com/openshift/origin/test/extended/images" -``` Groups vs. packages ------------------- @@ -199,7 +163,7 @@ var _ = g.Describe("[default] STI build", func() { ) g.Describe("Building from a template", func() { - g.It(fmt.Sprintf("should create a image from %q template", stiBuildFixture), func() { + g.It(fmt.Sprintf("should create a image from %q template", filepath.Base(stiBuildFixture)), func() { ... } } diff --git a/test/extended/all.sh b/test/extended/all.sh deleted file mode 100755 index a3dfbf270925..000000000000 --- a/test/extended/all.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# -# This script will run all test scripts that are in test/extended. -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -test_scripts=`find test/extended -maxdepth 1 -name "*.sh" -not \( -name "all.sh" \)` - -OVERALL_RETURN=0 -for test_script in $test_scripts; do - STARTTIME=$(date +%s) - echo "${STARTTIME} starting ${test_script}"; - - set +e - # use a subshell to prevent `exit` calls from killing this script - (${test_script}) - CURR_RETURN=$? - set -e - - if [ "${CURR_RETURN}" -ne "0" ]; then - OVERALL_RETURN=${CURR_RETURN} - fi - ENDTIME=$(date +%s); echo "${test_script} took $(($ENDTIME - $STARTTIME)) seconds and returned with ${CURR_RETURN}"; -done - -exit ${OVERALL_RETURN} diff --git a/test/extended/alternate_certs.sh b/test/extended/alternate_certs.sh deleted file mode 100755 index 8a39ec24abe3..000000000000 --- a/test/extended/alternate_certs.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -# -# This scripts starts the OpenShift server with custom TLS certs, and verifies generated kubeconfig files can be used to talk to it. -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -os::cleanup::tmpdir -os::util::environment::setup_all_server_vars - -function cleanup() { - return_code=$? - os::test::junit::generate_report - os::cleanup::all - os::util::describe_return_code "${return_code}" - exit "${return_code}" -} -trap "cleanup" EXIT - -os::log::info "Starting server as distinct processes" -os::log::info "`openshift version`" -os::log::info "Server logs will be at: ${LOG_DIR}/openshift.log" -os::log::info "Test artifacts will be in: ${ARTIFACT_DIR}" -os::log::info "Config dir is: ${SERVER_CONFIG_DIR}" - -mkdir -p ${LOG_DIR} - -os::log::info "Scan of OpenShift related processes already up via ps -ef | grep openshift : " -ps -ef | grep openshift - -mkdir -p "${SERVER_CONFIG_DIR}" -pushd "${SERVER_CONFIG_DIR}" - -os::test::junit::declare_suite_start "extended/alternate_certs" - -# Make custom CA and server cert -os::cmd::expect_success 'oc adm ca create-signer-cert --overwrite=true --cert=master/custom-ca.crt --key=master/custom-ca.key --serial=master/custom-ca.txt --name=my-custom-ca@`date +%s`' -os::cmd::expect_success 'oc adm ca create-server-cert --cert=master/custom.crt --key=master/custom.key --hostnames=localhost,customhost.com --signer-cert=master/custom-ca.crt --signer-key=master/custom-ca.key --signer-serial=master/custom-ca.txt' - -# Create master/node configs -os::cmd::expect_success "openshift start --master=https://localhost:${API_PORT} --write-config=. --hostname=mynode --etcd-dir=./etcd --certificate-authority=master/custom-ca.crt" - -# Don't try this at home. We don't have flags for setting etcd ports in the config, but we want deconflicted ones. Use sed to replace defaults in a completely unsafe way -os::util::sed "s/:4001$/:${ETCD_PORT}/g" master/master-config.yaml -os::util::sed "s/:7001$/:${ETCD_PEER_PORT}/g" master/master-config.yaml -# replace top-level namedCertificates config -os::util::sed 's#^ namedCertificates: null# namedCertificates: [{"certFile":"custom.crt","keyFile":"custom.key","names":["localhost"]}]#' master/master-config.yaml - -# Start master -OPENSHIFT_ON_PANIC=crash openshift start master \ - --config=master/master-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/openshift.log" & -OS_PID=$! - -# Wait for the server to be up -os::cmd::try_until_success "oc whoami --kubeconfig=master/admin.kubeconfig" - -# Verify the server is serving with the custom and internal CAs, and that the generated ca-bundle.crt works for both -os::cmd::expect_success_and_text "curl -vvv https://localhost:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'my-custom-ca' -os::cmd::expect_success_and_text "curl -vvv https://127.0.0.1:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'openshift-signer' - -# Verify kubeconfigs have connectivity to hosts serving with custom and generated certs -os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig" 'system:admin' -os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin' -os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin' - -os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig" 'system:openshift-master' -os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master' -os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master' - -os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig" 'system:node:mynode' -os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode' -os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode' - -os::test::junit::declare_suite_end - -kill $OS_PID - -popd diff --git a/test/extended/alternate_launches.sh b/test/extended/alternate_launches.sh deleted file mode 100755 index dbab43d6b49e..000000000000 --- a/test/extended/alternate_launches.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -# -# This scripts starts the OpenShift server with a default configuration. -# The OpenShift Docker registry and router are installed. -# It will run all tests that are imported into test/extended. -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -os::util::environment::use_sudo -os::cleanup::tmpdir -os::util::environment::setup_all_server_vars - -function cleanup() { - return_code=$? - os::test::junit::generate_report - os::cleanup::all - os::util::describe_return_code "${return_code}" - exit "${return_code}" -} -trap "cleanup" EXIT - -os::log::info "Starting server as distinct processes" -os::util::ensure::iptables_privileges_exist -os::start::configure_server - -os::log::info "`openshift version`" -os::log::info "Server logs will be at: ${LOG_DIR}/openshift.log" -os::log::info "Test artifacts will be in: ${ARTIFACT_DIR}" -os::log::info "Volumes dir is: ${VOLUME_DIR}" -os::log::info "Config dir is: ${SERVER_CONFIG_DIR}" -os::log::info "Using images: ${USE_IMAGES}" -os::log::info "MasterIP is: ${MASTER_ADDR}" - -mkdir -p ${LOG_DIR} - -os::log::info "Scan of OpenShift related processes already up via ps -ef | grep openshift : " -ps -ef | grep openshift - -os::test::junit::declare_suite_start "extended/alternate_launches" - -os::log::info "Starting etcdserver" -sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift start etcd \ - --config=${MASTER_CONFIG_DIR}/master-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-etcdserver.log" & - -os::log::info "Starting api server" -sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift start master api \ - --config=${MASTER_CONFIG_DIR}/master-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-apiserver.log" & - -os::cmd::try_until_text "oc get --raw /healthz --as system:unauthenticated --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 80 * second )) 0.25 -os::cmd::try_until_text "oc get --raw /healthz/ready --as system:unauthenticated --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' $(( 80 * second )) 0.25 -os::log::info "OpenShift API server up at: " -date - -# test alternate node level launches -os::log::info "Testing alternate node configurations" - -# proxy only -sudo env "PATH=${PATH}" TEST_CALL=1 OPENSHIFT_ON_PANIC=crash openshift-sdn --enable=proxy \ - --config=${NODE_CONFIG_DIR}/node-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-network-1.log" & -OS_PID=$! -os::cmd::try_until_text 'cat ${LOG_DIR}/os-network-1.log' 'syncProxyRules took' -pgrep -P "${OS_PID}" | xargs -r sudo kill -os::cmd::expect_success_and_text 'cat ${LOG_DIR}/os-network-1.log' 'Starting node networking' -os::cmd::expect_success_and_text 'cat ${LOG_DIR}/os-network-1.log' 'Started Kubernetes Proxy on' - -# proxy only -sudo env "PATH=${PATH}" TEST_CALL=1 OPENSHIFT_ON_PANIC=crash openshift-sdn --enable=proxy \ - --config=${NODE_CONFIG_DIR}/node-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-node-1.log" & -OS_PID=$! -os::cmd::try_until_text 'cat ${LOG_DIR}/os-node-1.log' 'syncProxyRules took' -pgrep -P "${OS_PID}" | xargs -r sudo kill -os::cmd::expect_success_and_text 'cat ${LOG_DIR}/os-node-1.log' 'Starting node networking' -os::cmd::expect_success_and_text 'cat ${LOG_DIR}/os-node-1.log' 'Started Kubernetes Proxy on' - -# plugins only -sudo env "PATH=${PATH}" TEST_CALL=1 OPENSHIFT_ON_PANIC=crash openshift-sdn --enable=plugins \ - --config=${NODE_CONFIG_DIR}/node-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-network-2.log" & -OS_PID=$! -os::cmd::try_until_text 'cat ${LOG_DIR}/os-network-2.log' 'Connecting to API server' -pgrep -P "${OS_PID}" | xargs -r sudo kill -os::cmd::expect_success_and_text 'cat ${LOG_DIR}/os-network-2.log' 'Starting node networking' -os::cmd::expect_success_and_not_text 'cat ${LOG_DIR}/os-network-2.log' 'Started Kubernetes Proxy on' - -os::log::info "Starting controllers" -sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift start master controllers \ - --config=${MASTER_CONFIG_DIR}/master-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-controllers.log" & - -os::log::info "Starting kubelet" -sudo env "PATH=${PATH}" hyperkube kubelet \ - $( openshift-node-config --config=${NODE_CONFIG_DIR}/node-config.yaml --loglevel=4 ) \ -&>"${LOG_DIR}/os-node.log" & -export OS_PID=$! - -os::log::info "Starting network" -sudo env "PATH=${PATH}" OPENSHIFT_ON_PANIC=crash openshift-sdn \ - --config=${NODE_CONFIG_DIR}/node-config.yaml \ - --loglevel=4 \ -&>"${LOG_DIR}/os-network.log" & - -os::log::info "OpenShift server start at: " -date - -os::cmd::try_until_text "oc get --raw ${KUBELET_SCHEME}://${KUBELET_HOST}:${KUBELET_PORT}/healthz --as system:unauthenticated --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" 'ok' minute 0.5 -os::cmd::try_until_success "oc get --raw /api/v1/nodes/${KUBELET_HOST} --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'" $(( 80 * second )) 0.25 -os::log::info "OpenShift node health checks done at: " -date - -# set our default KUBECONFIG location -export KUBECONFIG="${ADMIN_KUBECONFIG}" - -# TODO this is copy/paste from hack/test-end-to-end.sh. We need to DRY -if [[ -n "${USE_IMAGES:-}" ]]; then - readonly JQSETPULLPOLICY='(.items[] | select(.kind == "DeploymentConfig") | .spec.template.spec.containers[0].imagePullPolicy) |= "IfNotPresent"' - os::cmd::expect_success "oc adm registry --dry-run -o json --images='$USE_IMAGES' | jq '$JQSETPULLPOLICY' | oc create -f -" -else - os::cmd::expect_success "oc adm registry" -fi -os::cmd::expect_success 'oc adm policy add-scc-to-user hostnetwork -z router' -os::cmd::expect_success 'oc adm router' - -os::test::junit::declare_suite_end - -${OS_ROOT}/test/end-to-end/core.sh diff --git a/test/extended/builds/labels.go b/test/extended/builds/labels.go index fc767b282389..4f24343a9e17 100644 --- a/test/extended/builds/labels.go +++ b/test/extended/builds/labels.go @@ -2,6 +2,7 @@ package builds import ( "fmt" + "path/filepath" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" @@ -43,7 +44,7 @@ var _ = g.Describe("[Feature:Builds][Slow][Smoke] result image should have prope }) g.Describe("S2I build from a template", func() { - g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", stiBuildFixture), func() { + g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", filepath.Base(stiBuildFixture)), func() { g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture)) err := oc.Run("create").Args("-f", imageStreamFixture).Execute() @@ -71,7 +72,7 @@ var _ = g.Describe("[Feature:Builds][Slow][Smoke] result image should have prope }) g.Describe("Docker build from a template", func() { - g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", dockerBuildFixture), func() { + g.It(fmt.Sprintf("should create a image from %q template with proper Docker labels", filepath.Base(dockerBuildFixture)), func() { g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture)) err := oc.Run("create").Args("-f", imageStreamFixture).Execute() diff --git a/test/extended/builds/s2i_env.go b/test/extended/builds/s2i_env.go index 877bee1f6c88..74165b859145 100644 --- a/test/extended/builds/s2i_env.go +++ b/test/extended/builds/s2i_env.go @@ -2,6 +2,7 @@ package builds import ( "fmt" + "path/filepath" "strings" e2e "k8s.io/kubernetes/test/e2e/framework" @@ -49,7 +50,7 @@ var _ = g.Describe("[Feature:Builds][Slow] s2i build with environment file in so }) g.Describe("Building from a template", func() { - g.It(fmt.Sprintf("should create a image from %q template and run it in a pod", stiEnvBuildFixture), func() { + g.It(fmt.Sprintf("should create a image from %q template and run it in a pod", filepath.Base(stiEnvBuildFixture)), func() { g.By(fmt.Sprintf("calling oc create -f %q", imageStreamFixture)) err := oc.Run("create").Args("-f", imageStreamFixture).Execute() diff --git a/test/extended/builds/s2i_incremental.go b/test/extended/builds/s2i_incremental.go index d77c4cd76721..e86e18cfedc5 100644 --- a/test/extended/builds/s2i_incremental.go +++ b/test/extended/builds/s2i_incremental.go @@ -2,6 +2,7 @@ package builds import ( "fmt" + "path/filepath" "strings" g "github.com/onsi/ginkgo" @@ -49,7 +50,7 @@ var _ = g.Describe("[Feature:Builds][Slow] incremental s2i build", func() { }) g.Describe("Building from a template", func() { - g.It(fmt.Sprintf("should create a build from %q template and run it", templateFixture), func() { + g.It(fmt.Sprintf("should create a build from %q template and run it", filepath.Base(templateFixture)), func() { g.By(fmt.Sprintf("calling oc new-app -f %q", templateFixture)) err := oc.Run("new-app").Args("-f", templateFixture).Execute() diff --git a/test/extended/cni_vendor_test.sh b/test/extended/cni_vendor_test.sh deleted file mode 100755 index 553326d59d71..000000000000 --- a/test/extended/cni_vendor_test.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -# Set this to false if the plugin does not implement NetworkPolicy: -export NETWORKING_E2E_NETWORKPOLICY="${NETWORKING_E2E_NETWORKPOLICY:-true}" - -# Set this to true if the plugin implements isolation in the same manner as -# redhat/openshift-ovs-multitenant: -export NETWORKING_E2E_ISOLATION="${NETWORKING_E2E_ISOLATION:-false}" - -export NETWORKING_E2E_FOCUS="${NETWORKING_E2E_FOCUS:-\[Area:Networking\]}" -export NETWORKING_E2E_EXTERNAL=1 - -# Checking for a given kubeconfig -os::log::info "Starting 'networking' extended tests for cni plugin" -if [[ -n "${OPENSHIFT_TEST_KUBECONFIG:-}" ]]; then - # Run tests against an existing cluster - "${OS_ROOT}/test/extended/networking.sh" $@ -else - os::log::error "Please set env OPENSHIFT_TEST_KUBECONFIG to run the tests against an existing cluster" - exit 1 -fi diff --git a/test/extended/compatibility.sh b/test/extended/compatibility.sh deleted file mode 100755 index 64fdc8c582cb..000000000000 --- a/test/extended/compatibility.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# -# Runs extended compatibility tests with a previous version -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" -source "${OS_ROOT}/test/extended/setup.sh" - -# Previous version to test against -PREVIOUS_VERSION="v1.3.0" - -export API_SERVER_VERSION="${RUN_PREVIOUS_API:+${PREVIOUS_VERSION}}" -export CONTROLLER_VERSION="${RUN_PREVIOUS_CONTROLLER:+${PREVIOUS_VERSION}}" - -# For now, compatibility tests will not require a node -# so tests can execute quicker -export SKIP_NODE=1 - -os::test::extended::setup -os::test::extended::focus "$@" - - -os::log::info "Running compatibility tests" -FOCUS="\[Compatibility\]" SKIP="${SKIP_TESTS:-}" TEST_REPORT_FILE_NAME=compatibility os::test::extended::run -- -test.timeout 2h diff --git a/test/extended/conformance.sh b/test/extended/conformance.sh deleted file mode 100755 index fb183fba7bc1..000000000000 --- a/test/extended/conformance.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# -# Runs the conformance extended tests for OpenShift -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" -source "${OS_ROOT}/test/extended/setup.sh" - -os::test::extended::setup -os::test::extended::focus "$@" - -exitstatus=0 - -# run parallel tests -os::log::info "Running parallel tests N=${PARALLEL_NODES:-}" -SUITE=openshift/conformance/parallel/minimal TEST_PARALLEL="${PARALLEL_NODES:-5}" TEST_REPORT_FILE_NAME=conformance_parallel os::test::extended::run -- -test.timeout 6h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - -# run tests in serial -os::log::info "Running serial tests" -SUITE=openshift/conformance/serial/minimal TEST_REPORT_FILE_NAME=conformance_serial os::test::extended::run -- -test.timeout 2h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - -exit $exitstatus diff --git a/test/extended/core.sh b/test/extended/core.sh deleted file mode 100755 index d9095c8ffbc8..000000000000 --- a/test/extended/core.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# -# Runs all standard extended tests against either an existing cluster (TEST_ONLY=1) -# or a standard started server. -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" -source "${OS_ROOT}/test/extended/setup.sh" - -os::test::extended::setup -os::test::extended::focus "$@" - -exitstatus=0 - -# run parallel tests -os::log::info "Running parallel tests N=${PARALLEL_NODES:-}" -SUITE=openshift/conformance/parallel/minimal TEST_PARALLEL="${PARALLEL_NODES:-5}" TEST_REPORT_FILE_NAME=core_parallel os::test::extended::run -- -test.timeout 6h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - -# run tests in serial -os::log::info "" -os::log::info "Running serial tests" -SUITE=openshift/conformance/serial/minimal TEST_REPORT_FILE_NAME=core_serial os::test::extended::run -- -test.timeout 2h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - -exit $exitstatus diff --git a/test/extended/gssapi.sh b/test/extended/gssapi.sh deleted file mode 100755 index 4ebdf670c071..000000000000 --- a/test/extended/gssapi.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env bash -# -# Extended tests for logging in using GSSAPI -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -project_name='gssapiproxy' -test_name="test-extended/${project_name}" - -os::build::setup_env - -os::util::environment::use_sudo -os::util::environment::setup_time_vars -os::cleanup::tmpdir -os::util::environment::setup_all_server_vars - -os::log::system::start - -os::util::ensure::iptables_privileges_exist - -# Always keep containers' raw output for simplicity -junit_gssapi_output="${LOG_DIR}/raw_test_output_gssapi.log" - -os::test::junit::declare_suite_start "${test_name}" - -os::cmd::expect_success_and_text 'oc version' 'GSSAPI Kerberos SPNEGO' - -function cleanup() { - return_code=$? - os::test::junit::generate_report - os::cleanup::all - os::util::describe_return_code "${return_code}" - exit "${return_code}" -} -trap "cleanup" EXIT - -os::start::configure_server - -# set up env vars -cp -R test/extended/testdata/gssapi "${BASETMPDIR}" -test_data_location="${BASETMPDIR}/gssapi" - -host='gssapiproxy-server.gssapiproxy.svc.cluster.local' -realm="${host^^}" -backend='https://openshift.default.svc.cluster.local:443' - -oauth_patch="$(sed "s/HOST_NAME/${host}/" "${test_data_location}/config/oauth_config.json")" -final_oauth_patch="{"$(echo ${oauth_patch} | tail -n +3 | head -n -2 | sed s/\\n//g)"}" -cp "${SERVER_CONFIG_DIR}/master/master-config.yaml" "${SERVER_CONFIG_DIR}/master/master-config.tmp.yaml" -oc patch -f "${SERVER_CONFIG_DIR}/master/master-config.tmp.yaml" --local --type=json -o yaml --patch="[{\"op\": \"replace\", \"path\": \"/oauthConfig/identityProviders\", \"value\": ${final_oauth_patch}}]" > "${SERVER_CONFIG_DIR}/master/master-config.yaml" -os::start::server - -export KUBECONFIG="${ADMIN_KUBECONFIG}" - -os::start::registry -os::cmd::expect_success 'oc rollout status dc/docker-registry' - -os::cmd::expect_success 'oc login -u system:admin' -os::cmd::expect_success "oc new-project ${project_name}" -os::cmd::expect_success "oc adm policy add-scc-to-user anyuid -z default -n ${project_name}" - -# create all the resources we need -os::cmd::expect_success "oc create -f '${test_data_location}/proxy'" - -# kick off a build and wait for it to finish -os::cmd::expect_success "oc set env dc/gssapiproxy-server HOST='${host}' REALM='${realm}' BACKEND='${backend}'" -os::cmd::expect_success "oc start-build --from-dir='${test_data_location}/proxy' --follow --wait gssapiproxy" - -os_images=(fedora ubuntu) - -for os_image in "${os_images[@]}"; do - - pushd "${test_data_location}/${os_image}" > /dev/null - - pushd base > /dev/null - os::cmd::expect_success "cp '$(which oc)' ." - os::cmd::expect_success "cp -R '${OS_ROOT}/hack' ." - os::cmd::expect_success 'cp ../../scripts/test-wrapper.sh .' - os::cmd::expect_success 'cp ../../scripts/gssapi-tests.sh .' - os::cmd::expect_success 'cp ../../config/kubeconfig .' - os::cmd::expect_success "docker build --build-arg REALM='${realm}' --build-arg HOST='${host}' -t 'docker.io/${project_name}/${os_image}-gssapi-base:latest' ." - popd > /dev/null - - pushd kerberos > /dev/null - os::cmd::expect_success "docker build -t 'docker.io/${project_name}/${os_image}-gssapi-kerberos:latest' ." - popd > /dev/null - - pushd kerberos_configured > /dev/null - os::cmd::expect_success "docker build -t 'docker.io/${project_name}/${os_image}-gssapi-kerberos-configured:latest' ." - popd > /dev/null - - popd > /dev/null - -done - -function update_auth_proxy_config() { - local server_config="${1}" - local spec='{.items[0].spec.containers[0].env[?(@.name=="SERVER")].value}' - spec+='_' - spec+='{.items[0].status.conditions[?(@.type=="Ready")].status}' - - os::cmd::expect_success "oc set env dc/gssapiproxy-server SERVER='${server_config}'" - os::cmd::try_until_text "oc get pods -l deploymentconfig=gssapiproxy-server -o jsonpath='${spec}'" "^${server_config}_True$" $(( 10 * minute )) -} - -function run_gssapi_tests() { - local image_name="${1}" - local server_config="${2}" - local container_exit_code_jsonpath='{.status.containerStatuses[0].state.terminated.exitCode}' - local pod_log_location="${LOG_DIR}/${image_name}-${server_config}.log" - oc run "${image_name}" \ - --image="docker.io/${project_name}/${image_name}" \ - --generator=run-pod/v1 --restart=Never --attach \ - --env=SERVER="${server_config}" \ - 1> "${pod_log_location}" \ - 2>> "${junit_gssapi_output}" - # Lots of checks to really make sure that the tests ran successfully - os::cmd::expect_success_and_text "cat ${pod_log_location}" 'SUCCESS' - os::cmd::expect_success_and_not_text "cat ${pod_log_location}" 'FAILURE' - os::cmd::expect_success_and_text "cat ${pod_log_location}" "Finished running test-extended/gssapiproxy-tests/${image_name}-CLIENT_[[:upper:]_]+-${server_config}$" - os::cmd::try_until_text "oc get pod '${image_name}' -o jsonpath='${container_exit_code_jsonpath}'" '0' # kubelet takes time to update status - os::cmd::expect_success "oc delete pod '${image_name}'" -} - -for server_config in SERVER_GSSAPI_ONLY SERVER_GSSAPI_BASIC_FALLBACK; do - - update_auth_proxy_config "${server_config}" - - for os_image in "${os_images[@]}"; do - - run_gssapi_tests "${os_image}-gssapi-base" "${server_config}" - - run_gssapi_tests "${os_image}-gssapi-kerberos" "${server_config}" - - run_gssapi_tests "${os_image}-gssapi-kerberos-configured" "${server_config}" - - done - -done - -os::test::junit::declare_suite_end diff --git a/test/extended/extended_test.go b/test/extended/include.go similarity index 88% rename from test/extended/extended_test.go rename to test/extended/include.go index db8ae135055f..c32cbc561793 100644 --- a/test/extended/extended_test.go +++ b/test/extended/include.go @@ -1,8 +1,6 @@ package extended import ( - "testing" - _ "k8s.io/kubernetes/test/e2e" // test sources @@ -13,6 +11,7 @@ import ( _ "k8s.io/kubernetes/test/e2e/common" _ "k8s.io/kubernetes/test/e2e/instrumentation" _ "k8s.io/kubernetes/test/e2e/kubectl" + // _ "k8s.io/kubernetes/test/e2e/lifecycle" // _ "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap" // _ "k8s.io/kubernetes/test/e2e/multicluster" @@ -22,6 +21,7 @@ import ( _ "k8s.io/kubernetes/test/e2e/scheduling" _ "k8s.io/kubernetes/test/e2e/servicecatalog" _ "k8s.io/kubernetes/test/e2e/storage" + // _ "k8s.io/kubernetes/test/e2e/ui" _ "github.com/openshift/origin/test/extended/builds" @@ -37,19 +37,9 @@ import ( _ "github.com/openshift/origin/test/extended/jobs" _ "github.com/openshift/origin/test/extended/localquota" _ "github.com/openshift/origin/test/extended/networking" + _ "github.com/openshift/origin/test/extended/operators" _ "github.com/openshift/origin/test/extended/prometheus" _ "github.com/openshift/origin/test/extended/router" _ "github.com/openshift/origin/test/extended/security" _ "github.com/openshift/origin/test/extended/templates" - - exutil "github.com/openshift/origin/test/extended/util" ) - -// init initialize the extended testing suite. -func init() { - exutil.InitTest() -} - -func TestExtended(t *testing.T) { - exutil.ExecuteTest(t, "Extended") -} diff --git a/test/extended/ldap_groups.sh b/test/extended/ldap_groups.sh deleted file mode 100755 index a12c53aa6a75..000000000000 --- a/test/extended/ldap_groups.sh +++ /dev/null @@ -1,265 +0,0 @@ -#!/bin/bash -# -# This scripts starts the OpenShift server with a default configuration. -# The OpenShift Docker registry and router are installed. -# It will run all tests that are imported into test/extended. -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" -os::util::environment::setup_time_vars - -os::build::setup_env - -function cleanup() { - return_code=$? - os::test::junit::generate_report - os::cleanup::all - os::util::describe_return_code "${return_code}" - exit "${return_code}" -} -trap "cleanup" EXIT - -os::log::info "Starting server" - -os::util::ensure::iptables_privileges_exist -os::util::environment::use_sudo -os::cleanup::tmpdir -os::util::environment::setup_all_server_vars - -os::log::system::start - -os::start::configure_server -os::start::server - -export KUBECONFIG="${ADMIN_KUBECONFIG}" - -os::start::registry -oc rollout status dc/docker-registry - -oc login ${MASTER_ADDR} -u ldap -p password --certificate-authority=${MASTER_CONFIG_DIR}/ca.crt -oc new-project openldap - -# create all the resources we need -oc create -f test/extended/testdata/ldap - -is_event_template=( \ -"{{with \$tags := .status.tags}}" \ - "{{range \$tag := \$tags}}" \ - "{{\$tag.tag}} " \ - "{{end}}" \ -"{{end}}" \ -) -is_event_template=$(IFS=""; echo "${is_event_template[*]}") # re-formats template for use - -os::test::junit::declare_suite_start "extended/ldap-groups/setup" -# wait until the last event that occurred on the imagestream was the successful pull of the latest image -os::cmd::try_until_text "oc get imagestream openldap --template='${is_event_template}'" 'latest' "$((60*TIME_SEC))" - -# kick off a build and wait for it to finish -oc start-build openldap --follow --wait \ - --from-dir=$(dirname "${BASH_SOURCE}")/../../images/openldap - -server_ready_template=( \ -"{{with \$items := .items}}" \ - "{{with \$item := index \$items 0}}" \ - "{{range \$map := \$item.status.conditions}}" \ - "{{with \$state := index \$map \"type\"}}" \ - "{{\$state}}" \ - "{{end}}" \ - "{{with \$valid := index \$map \"status\"}}" \ - "{{\$valid}} " \ - "{{end}}" \ - "{{end}}" \ - "{{end}}" \ -"{{end}}" \ -) -server_ready_template=$(IFS=$""; echo "${server_ready_template[*]}") # re-formats template for use - -# wait for LDAP server to be ready -os::cmd::try_until_text "oc get pods -l deploymentconfig=openldap-server --template='${server_ready_template}'" "ReadyTrue " "$((60*TIME_SEC))" - -oc login -u system:admin -n openldap -os::test::junit::declare_suite_end - -LDAP_SERVICE_IP=$(oc get --template="{{ .spec.clusterIP }}" service openldap-server) - -function compare_and_cleanup() { - validation_file=$1 - actual_file=actual-${validation_file} - rm -f ${WORKINGDIR}/${actual_file} - oc get groups --no-headers | awk '{print $1}' | sort | xargs -I{} oc get --export group {} -o yaml >> ${WORKINGDIR}/${actual_file} - os::util::sed '/sync-time/d' ${WORKINGDIR}/${actual_file} - diff ${validation_file} ${WORKINGDIR}/${actual_file} - oc delete groups --all - echo -e "\tSUCCESS" -} - -oc login -u system:admin -n default - -os::log::info "Running extended tests" - -schema=('rfc2307' 'ad' 'augmented-ad') - -for (( i=0; i<${#schema[@]}; i++ )); do - current_schema=${schema[$i]} - os::log::info "Testing schema: ${current_schema}" - os::test::junit::declare_suite_start "extended/ldap-groups/${current_schema}" - - WORKINGDIR=${BASETMPDIR}/${current_schema} - mkdir ${WORKINGDIR} - - # create a temp copy of the test files - cp test/extended/authentication/ldap/${current_schema}/* ${WORKINGDIR} - pushd ${WORKINGDIR} > /dev/null - - # load OpenShift and LDAP group UIDs, needed for literal whitelists - # use awk instead of sed for compatibility (see os::util::sed) - group1_ldapuid=$(awk 'NR == 1 {print $0}' ldapgroupuids.txt) - group2_ldapuid=$(awk 'NR == 2 {print $0}' ldapgroupuids.txt) - group3_ldapuid=$(awk 'NR == 3 {print $0}' ldapgroupuids.txt) - - group1_osuid=$(awk 'NR == 1 {print $0}' osgroupuids.txt) - group2_osuid=$(awk 'NR == 2 {print $0}' osgroupuids.txt) - group3_osuid=$(awk 'NR == 3 {print $0}' osgroupuids.txt) - - # update sync-configs and validation files with the LDAP server's IP - config_files=sync-config*.yaml - validation_files=valid*.yaml - for config in ${config_files} ${validation_files} - do - os::util::sed "s/LDAP_SERVICE_IP/${LDAP_SERVICE_IP}/g" ${config} - done - - echo -e "\tTEST: Sync all LDAP groups from LDAP server" - oc adm groups sync --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_all_ldap_sync.yaml - - - # WHITELISTS - echo -e "\tTEST: Sync subset of LDAP groups from LDAP server using whitelist file" - oc adm groups sync --whitelist=whitelist_ldap.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_whitelist_sync.yaml - - echo -e "\tTEST: Sync subset of LDAP groups from LDAP server using literal whitelist" - oc adm groups sync ${group1_ldapuid} --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_whitelist_sync.yaml - - echo -e "\tTEST: Sync subset of LDAP groups from LDAP server using union of literal whitelist and whitelist file" - oc adm groups sync ${group2_ldapuid} --whitelist=whitelist_ldap.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_whitelist_union_sync.yaml - - echo -e "\tTEST: Sync subset of OpenShift groups from LDAP server using whitelist file" - oc adm groups sync ${group1_ldapuid} --sync-config=sync-config.yaml --confirm - oc patch group ${group1_osuid} -p 'users: []' - oc adm groups sync --type=openshift --whitelist=whitelist_openshift.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_whitelist_sync.yaml - - echo -e "\tTEST: Sync subset of OpenShift groups from LDAP server using literal whitelist" - # sync group from LDAP - oc adm groups sync ${group1_ldapuid} --sync-config=sync-config.yaml --confirm - oc patch group ${group1_osuid} -p 'users: []' - oc adm groups sync --type=openshift ${group1_osuid} --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_whitelist_sync.yaml - - echo -e "\tTEST: Sync subset of OpenShift groups from LDAP server using union of literal whitelist and whitelist file" - # sync groups from LDAP - oc adm groups sync ${group1_ldapuid} ${group2_ldapuid} --sync-config=sync-config.yaml --confirm - oc patch group ${group1_osuid} -p 'users: []' - oc patch group ${group2_osuid} -p 'users: []' - oc adm groups sync --type=openshift group/${group2_osuid} --whitelist=whitelist_openshift.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_whitelist_union_sync.yaml - - - # BLACKLISTS - echo -e "\tTEST: Sync subset of LDAP groups from LDAP server using whitelist and blacklist file" - # oc adm groups sync --whitelist=ldapgroupuids.txt --blacklist=blacklist_ldap.txt --blacklist-group="${group1_ldapuid}" --sync-config=sync-config.yaml --confirm - oc adm groups sync --whitelist=ldapgroupuids.txt --blacklist=blacklist_ldap.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_all_blacklist_sync.yaml - - echo -e "\tTEST: Sync subset of LDAP groups from LDAP server using blacklist" - # oc adm groups sync --blacklist=blacklist_ldap.txt --blacklist-group=${group1_ldapuid} --sync-config=sync-config.yaml --confirm - oc adm groups sync --blacklist=blacklist_ldap.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_all_blacklist_sync.yaml - - echo -e "\tTEST: Sync subset of OpenShift groups from LDAP server using whitelist and blacklist file" - oc adm groups sync --sync-config=sync-config.yaml --confirm - oc get group -o name --no-headers | xargs -n 1 oc patch -p 'users: []' - # oc adm groups sync --type=openshift --whitelist=osgroupuids.txt --blacklist=blacklist_openshift.txt --blacklist-group=${group1_osuid} --sync-config=sync-config.yaml --confirm - oc adm groups sync --type=openshift --whitelist=osgroupuids.txt --blacklist=blacklist_openshift.txt --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_all_openshift_blacklist_sync.yaml - - - # MAPPINGS - echo -e "\tTEST: Sync all LDAP groups from LDAP server using a user-defined mapping" - oc adm groups sync --sync-config=sync-config-user-defined.yaml --confirm - compare_and_cleanup valid_all_ldap_sync_user_defined.yaml - - echo -e "\tTEST: Sync all LDAP groups from LDAP server using a partially user-defined mapping" - oc adm groups sync --sync-config=sync-config-partially-user-defined.yaml --confirm - compare_and_cleanup valid_all_ldap_sync_partially_user_defined.yaml - - echo -e "\tTEST: Sync based on OpenShift groups respecting OpenShift mappings" - oc adm groups sync --sync-config=sync-config-user-defined.yaml --confirm - oc get group -o name --no-headers | xargs -n 1 oc patch -p 'users: []' - oc adm groups sync --type=openshift --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_all_ldap_sync_user_defined.yaml - - echo -e "\tTEST: Sync all LDAP groups from LDAP server using DN as attribute whenever possible" - oc adm groups sync --sync-config=sync-config-dn-everywhere.yaml --confirm - compare_and_cleanup valid_all_ldap_sync_dn_everywhere.yaml - - echo -e "\tTEST: Sync based on OpenShift groups respecting OpenShift mappings and whitelist file" - os::cmd::expect_success_and_text 'oc adm groups sync --whitelist=ldapgroupuids.txt --sync-config=sync-config-user-defined.yaml --confirm' 'group/' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name}' 'firstgroup secondgroup thirdgroup' - os::cmd::expect_success_and_text 'oc adm groups sync --type=openshift --whitelist=ldapgroupuids.txt --sync-config=sync-config-user-defined.yaml --confirm' 'group/' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name}' 'firstgroup secondgroup thirdgroup' - os::cmd::expect_success_and_text 'oc delete groups --all' 'deleted' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name} | wc -l' '0' - - - # PRUNING - echo -e "\tTEST: Sync all LDAP groups from LDAP server, change LDAP UID, then prune OpenShift groups" - oc adm groups sync --sync-config=sync-config.yaml --confirm - oc patch group ${group2_osuid} -p "{\"metadata\":{\"annotations\":{\"openshift.io/ldap.uid\":\"cn=garbage,${group2_ldapuid}\"}}}" - oc adm groups prune --sync-config=sync-config.yaml --confirm - compare_and_cleanup valid_all_ldap_sync_prune.yaml - - echo -e "\tTEST: Sync all LDAP groups from LDAP server using whitelist file, then prune OpenShift groups using the same whitelist file" - os::cmd::expect_success_and_text 'oc adm groups sync --whitelist=ldapgroupuids.txt --sync-config=sync-config-user-defined.yaml --confirm' 'group/' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name}' 'firstgroup secondgroup thirdgroup' - os::cmd::expect_success_and_text 'oc adm groups prune --whitelist=ldapgroupuids.txt --sync-config=sync-config-user-defined.yaml --confirm | wc -l' '0' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name}' 'firstgroup secondgroup thirdgroup' - os::cmd::expect_success_and_text 'oc patch group secondgroup -p "{\"metadata\":{\"annotations\":{\"openshift.io/ldap.uid\":\"cn=garbage\"}}}"' 'group.user.openshift.io/secondgroup patched' - os::cmd::expect_success_and_text 'oc adm groups prune --whitelist=ldapgroupuids.txt --sync-config=sync-config-user-defined.yaml --confirm' 'group/secondgroup' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name}' 'firstgroup thirdgroup' - os::cmd::expect_success_and_text 'oc delete groups --all' 'deleted' - os::cmd::expect_success_and_text 'oc get group -o jsonpath={.items[*].metadata.name} | wc -l' '0' - - - # PAGING - echo -e "\tTEST: Sync all LDAP groups from LDAP server using paged queries" - oc adm groups sync --sync-config=sync-config-paging.yaml --confirm - compare_and_cleanup valid_all_ldap_sync.yaml - - - os::test::junit::declare_suite_end - popd > /dev/null -done - -# special test for RFC2307 -pushd ${BASETMPDIR}/rfc2307 > /dev/null -echo -e "\tTEST: Sync groups from LDAP server, tolerating errors" -oc adm groups sync --sync-config=sync-config-tolerating.yaml --confirm 2>"${LOG_DIR}/tolerated-output.txt" -grep 'For group "cn=group1,ou=groups,ou=incomplete\-rfc2307,dc=example,dc=com", ignoring member "cn=INVALID,ou=people,ou=rfc2307,dc=example,dc=com"' "${LOG_DIR}/tolerated-output.txt" -grep 'For group "cn=group2,ou=groups,ou=incomplete\-rfc2307,dc=example,dc=com", ignoring member "cn=OUTOFSCOPE,ou=people,ou=OUTOFSCOPE,dc=example,dc=com"' "${LOG_DIR}/tolerated-output.txt" -grep 'For group "cn=group3,ou=groups,ou=incomplete\-rfc2307,dc=example,dc=com", ignoring member "cn=INVALID,ou=people,ou=rfc2307,dc=example,dc=com"' "${LOG_DIR}/tolerated-output.txt" -grep 'For group "cn=group3,ou=groups,ou=incomplete\-rfc2307,dc=example,dc=com", ignoring member "cn=OUTOFSCOPE,ou=people,ou=OUTOFSCOPE,dc=example,dc=com"' "${LOG_DIR}/tolerated-output.txt" -compare_and_cleanup valid_all_ldap_sync_tolerating.yaml -popd > /dev/null - -# special test for augmented-ad -pushd ${BASETMPDIR}/augmented-ad > /dev/null -echo -e "\tTEST: Sync all LDAP groups from LDAP server, remove LDAP group metadata entry, then prune OpenShift groups" -oc adm groups sync --sync-config=sync-config.yaml --confirm -ldapdelete -x -h $LDAP_SERVICE_IP -p 389 -D cn=Manager,dc=example,dc=com -w admin "${group1_ldapuid}" -oc adm groups prune --sync-config=sync-config.yaml --confirm -compare_and_cleanup valid_all_ldap_sync_delete_prune.yaml -popd > /dev/null diff --git a/test/extended/networking-minimal.sh b/test/extended/networking-minimal.sh deleted file mode 100755 index abe9e4a2b809..000000000000 --- a/test/extended/networking-minimal.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -# Wrapper to configure networking.sh to run a minimal set of tests. -NETWORKING_E2E_MINIMAL=1 "${OS_ROOT}/test/extended/networking.sh" diff --git a/test/extended/networking.sh b/test/extended/networking.sh deleted file mode 100755 index 8440ca3ccd43..000000000000 --- a/test/extended/networking.sh +++ /dev/null @@ -1,379 +0,0 @@ -#!/bin/bash - -# This script runs the networking e2e tests. See CONTRIBUTING.adoc for -# documentation. -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" - -if [[ -n "${OPENSHIFT_VERBOSE_OUTPUT:-}" ]]; then - set -o xtrace - export PS4='+ \D{%b %d %H:%M:%S} $(basename ${BASH_SOURCE}):${LINENO} ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' -fi - -# Ensure that subshells inherit bash settings (specifically xtrace) -export SHELLOPTS - -# These strings filter the available tests. -# -# The EmptyDir test is a canary; it will fail if mount propagation is -# not properly configured on the host. -NETWORKING_E2E_FOCUS="${NETWORKING_E2E_FOCUS:-Networking|Services|Feature:OSNetworkPolicy|EmptyDir volumes should support \(root,0644,tmpfs\)}" - -NETWORKING_E2E_MINIMAL="${NETWORKING_E2E_MINIMAL:-}" - -# Tests that are skipped when running networking-minimal.sh because they're slow -# and unlikely to be broken by changes outside of the SDN code. -MINIMAL_SKIP_LIST=( - "multicast" -) - -NETWORKING_E2E_EXTERNAL="${NETWORKING_E2E_EXTERNAL:-}" - -# Tests that are are openshift-sdn-specific, so shouldn't be run against external plugins -EXTERNAL_PLUGIN_SKIP_LIST=( - # Relies on an OpenShift-specific annotation, and is not a "required" feature for - # network plugins - "multicast" -) - -CLUSTER_CMD="${OS_ROOT}/hack/dind-cluster.sh" - -# Control variable to limit unnecessary cleanup -DIND_CLEANUP_REQUIRED=0 - -function copy-container-files() { - local source_path=$1 - local base_dest_dir=$2 - - for container_name in "${CONTAINER_NAMES[@]}"; do - local dest_dir="${base_dest_dir}/${container_name}" - if [[ ! -d "${dest_dir}" ]]; then - mkdir -p "${dest_dir}" - fi - sudo docker cp "${container_name}:${source_path}" "${dest_dir}" - done -} - -function save-container-logs() { - local base_dest_dir=$1 - local deployment_failed=${2:-} - - os::log::info "Saving container logs" - - local container_log_file="/tmp/systemd.log" - - for container_name in "${CONTAINER_NAMES[@]}"; do - local dest_dir="${base_dest_dir}/${container_name}" - if [[ ! -d "${dest_dir}" ]]; then - mkdir -p "${dest_dir}" - fi - sudo docker exec -t "${container_name}" bash -c "journalctl > ${container_log_file}" - sudo docker cp "${container_name}:${container_log_file}" "${dest_dir}" - if [[ -n "${deployment_failed}" ]]; then - # Output container logs to stdout to ensure that jenkins has - # detail to classify the failure cause. - local msg="System logs for container ${container_name}" - os::log::info "< ${msg} >" - os::log::info "***************************************************" - cat "${dest_dir}/$(basename "${container_log_file}")" - os::log::info "***************************************************" - os::log::info "" - fi - done -} - -function save-artifacts() { - local name=$1 - local config_root=$2 - - os::log::info "Saving cluster configuration" - - local dest_dir="${ARTIFACT_DIR}/${name}" - - local config_source="${config_root}/openshift.local.config" - local config_dest="${dest_dir}/openshift.local.config" - mkdir -p "${config_dest}" - cp -r ${config_source}/* ${config_dest}/ - - copy-container-files "/etc/hosts" "${dest_dir}" -} - -function deploy-cluster() { - local name=$1 - local plugin=$2 - local log_dir=$3 - - os::log::info "Launching a docker-in-docker cluster for the ${name} plugin" - export OPENSHIFT_CONFIG_ROOT="${BASETMPDIR}/${name}" - DIND_CLEANUP_REQUIRED=1 - - local exit_status=0 - if ! ${CLUSTER_CMD} start -r -n "${plugin}"; then - exit_status=1 - fi - - save-artifacts "${name}" "${OPENSHIFT_CONFIG_ROOT}" - - return "${exit_status}" -} - -function get-kubeconfig-from-root() { - local config_root=$1 - - echo "${config_root}/openshift.local.config/master/admin.kubeconfig" -} - -# Any non-zero exit code from any test run invoked by this script -# should increment TEST_FAILURE so the total count of failed test runs -# can be returned as the exit code. -TEST_FAILURES=0 -function test-osdn-plugin() { - local name=$1 - local plugin=$2 - - os::log::info "Targeting ${name} plugin: ${plugin}" - - local log_dir="${LOG_DIR}/${name}" - mkdir -p "${log_dir}" - - local deployment_failed= - local tests_failed= - - if deploy-cluster "${name}" "${plugin}" "${log_dir}"; then - os::log::info "Running networking e2e tests against the ${name} plugin" - export TEST_REPORT_FILE_NAME="${name}-junit" - - local kubeconfig="$(get-kubeconfig-from-root "${OPENSHIFT_CONFIG_ROOT}")" - if ! TEST_REPORT_FILE_NAME=networking_${name} \ - run-extended-tests "${kubeconfig}" "${log_dir}/test.log"; then - tests_failed=1 - os::log::error "e2e tests failed for plugin: ${plugin}" - fi - else - deployment_failed=1 - os::log::error "Failed to deploy cluster for plugin: {$name}" - fi - - # Record the failure before further errors can occur. - if [[ -n "${deployment_failed}" || -n "${tests_failed}" ]]; then - TEST_FAILURES=$((TEST_FAILURES + 1)) - fi - - # Output container logs to stdout if deployment fails - save-container-logs "${log_dir}" "${deployment_failed}" - - os::log::info "Shutting down docker-in-docker cluster for the ${name} plugin" - ${CLUSTER_CMD} stop - DIND_CLEANUP_REQUIRED=0 - rm -rf "${OPENSHIFT_CONFIG_ROOT}" -} - - -function join { local IFS="$1"; shift; echo "$*"; } - -function run-extended-tests() { - local kubeconfig=$1 - local log_path=${2:-} - local dlv_debug="${DLV_DEBUG:-}" - - local focus_regex="${NETWORKING_E2E_FOCUS}" - local skip_regex="" - - if [[ -n "${NETWORKING_E2E_MINIMAL}" ]]; then - skip_regex="$(join '|' "${MINIMAL_SKIP_LIST[@]}")" - elif [[ -n "${NETWORKING_E2E_EXTERNAL}" ]]; then - skip_regex="$(join '|' "${EXTERNAL_PLUGIN_SKIP_LIST[@]}")" - fi - - export KUBECONFIG="${kubeconfig}" - - local test_args="--test.v '--ginkgo.skip=${skip_regex}' \ -'--ginkgo.focus=${focus_regex}' ${TEST_EXTRA_ARGS}" - - # this ${FOCUS} value will override the $focus_regex in the same way that - # the --ginkgo.focus argument did previously. - if [[ -n "${FOCUS:-}" ]]; then - test_args="${test_args} --ginkgo.focus=${FOCUS}" - fi - - if [[ -n "${dlv_debug}" ]]; then - # run tests using delve debugger - local extended_test; extended_test="$( os::util::find::built_binary extended.test )" - local test_cmd="dlv exec ${extended_test} -- ${test_args}" - else - # run tests normally - local test_cmd="extended.test ${test_args}" - fi - - if [[ -n "${log_path}" ]]; then - if [[ -n "${dlv_debug}" ]]; then - os::log::warning "Not logging to file since DLV_DEBUG is enabled" - else - test_cmd="${test_cmd} | tee ${log_path}" - fi - fi - - pushd "${OS_ROOT}/test/extended/networking" > /dev/null - eval "${test_cmd}; "'exit_status=${PIPESTATUS[0]}' - popd > /dev/null - - return ${exit_status} -} - -CONFIG_ROOT="${OPENSHIFT_CONFIG_ROOT:-}" -case "${CONFIG_ROOT}" in - dev) - CONFIG_ROOT="${OS_ROOT}" - ;; - dind) - CONFIG_ROOT="/tmp/openshift-dind-cluster/\ -${OPENSHIFT_INSTANCE_PREFIX:-openshift}" - if [[ ! -d "${CONFIG_ROOT}" ]]; then - os::log::error "OPENSHIFT_CONFIG_ROOT=dind but dind cluster not found" - os::log::info "To launch a cluster: hack/dind-cluster.sh start" - exit 1 - fi - ;; - *) - if [[ -n "${CONFIG_ROOT}" ]]; then - CONFIG_FILE="${CONFIG_ROOT}/openshift.local.config/master/admin.kubeconfig" - if [[ ! -f "${CONFIG_FILE}" ]]; then - os::log::error "${CONFIG_FILE} not found" - exit 1 - fi - fi - ;; -esac - -TEST_EXTRA_ARGS="$@" - -if [[ "$@[@]" =~ "ginkgo.focus" ]]; then - os::log::fatal "the --ginkgo.focus flag is no longer supported, use FOCUS=foo instead." -fi - -if [[ -n "${OPENSHIFT_SKIP_BUILD:-}" ]] && - os::util::find::built_binary 'extended.test' >/dev/null 2>&1; then - os::log::warning "Skipping rebuild of test binary due to OPENSHIFT_SKIP_BUILD=1" -else - hack/build-go.sh test/extended/extended.test -fi - -# enable-selinux/disable-selinux use the shared control variable -# SELINUX_DISABLED to determine whether to re-enable selinux after it -# has been disabled. The goal is to allow temporary disablement of -# selinux enforcement while avoiding enabling enforcement in an -# environment where it is not already enabled. -SELINUX_DISABLED=0 - -function enable-selinux() { - if [ "${SELINUX_DISABLED}" = "1" ]; then - os::log::info "Re-enabling selinux enforcement" - sudo setenforce 1 - SELINUX_DISABLED=0 - fi -} - -function disable-selinux() { - if selinuxenabled && [ "$(getenforce)" = "Enforcing" ]; then - os::log::info "Temporarily disabling selinux enforcement" - sudo setenforce 0 - SELINUX_DISABLED=1 - fi -} - -function kernel-supports-networkpolicy() { - # There's really no good way to test this "correctly" if OVS isn't installed. - # The mainline kernel got support for OVS NAT support in 4.6. RHEL kernels have - # it in 3.10.0-514 and later. - version="$(uname -r)" - case "${version}" in - 3.10.0-*.el7.*) - build=$(sed -e 's/.*-\([0-9]*\)\..*/\1/' <<< "${version}") - if [[ "${build}" -lt 514 ]]; then - return 1 - fi - ;; - [0-3].*|4.[0-5].*) - return 1 - ;; - esac - return 0 -} - -os::log::info "Starting 'networking' extended tests" -if [[ -n "${CONFIG_ROOT}" ]]; then - KUBECONFIG="$(get-kubeconfig-from-root "${CONFIG_ROOT}")" - os::log::info "KUBECONFIG=${KUBECONFIG}" - run-extended-tests "${KUBECONFIG}" -elif [[ -n "${OPENSHIFT_TEST_KUBECONFIG:-}" ]]; then - os::log::info "KUBECONFIG=${OPENSHIFT_TEST_KUBECONFIG}" - # Run tests against an existing cluster - run-extended-tests "${OPENSHIFT_TEST_KUBECONFIG}" -else - # For each plugin, run tests against a test-managed cluster - - # Use a unique instance prefix to ensure the names of the test dind - # containers will not clash with the names of non-test containers. - export OPENSHIFT_CLUSTER_ID="nettest" - # TODO(marun) Discover these names instead of hard-coding - CONTAINER_NAMES=( - "${OPENSHIFT_CLUSTER_ID}-master" - "${OPENSHIFT_CLUSTER_ID}-node-1" - "${OPENSHIFT_CLUSTER_ID}-node-2" - ) - - os::cleanup::tmpdir - - # Allow setting $JUNIT_REPORT to toggle output behavior - if [[ -n "${JUNIT_REPORT:-}" ]]; then - # the Ginkgo tests also generate jUnit but expect different envars - export TEST_REPORT_DIR="${ARTIFACT_DIR}/junit" - mkdir -p $TEST_REPORT_DIR - fi - - os::log::system::start - - os::log::info "Building docker-in-docker images" - ${CLUSTER_CMD} build-images - - # Ensure cleanup on error - ENABLE_SELINUX=0 - function cleanup-dind { - local exit_code=$? - if [[ "${DIND_CLEANUP_REQUIRED}" = "1" ]]; then - os::log::info "Shutting down docker-in-docker cluster" - ${CLUSTER_CMD} stop || true - fi - enable-selinux || true - if [[ "${TEST_FAILURES}" = "0" ]]; then - os::log::info "No test failures were detected" - else - os::log::error "${TEST_FAILURES} plugin(s) failed one or more tests" - fi - # Return non-zero for either command or test failures - if [[ "${exit_code}" = "0" ]]; then - exit_code="${TEST_FAILURES}" - else - os::log::error "Exiting with code ${exit_code}" - fi - exit $exit_code - } - trap "exit" INT TERM - trap "cleanup-dind" EXIT - - # Docker-in-docker is not compatible with selinux - disable-selinux - - # Skip subnet and networkpolicy tests during a minimal test run - if [[ -z "${NETWORKING_E2E_MINIMAL}" ]]; then - # Ignore deployment errors for a given plugin to allow other plugins - # to be tested. - test-osdn-plugin "subnet" "redhat/openshift-ovs-subnet" || true - if kernel-supports-networkpolicy; then - test-osdn-plugin "networkpolicy" "redhat/openshift-ovs-networkpolicy" || true - else - os::log::warning "Skipping networkpolicy tests due to kernel version" - fi - fi - - test-osdn-plugin "multitenant" "redhat/openshift-ovs-multitenant" || true -fi diff --git a/test/extended/operators/operators.go b/test/extended/operators/operators.go new file mode 100644 index 000000000000..e32e09496f7f --- /dev/null +++ b/test/extended/operators/operators.go @@ -0,0 +1,208 @@ +package operators + +import ( + "bytes" + "fmt" + "strings" + "text/tabwriter" + "time" + + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + "github.com/stretchr/objx" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + coreclient "k8s.io/client-go/kubernetes/typed/core/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + operatorWait = 1 * time.Minute + cvoWait = 5 * time.Minute +) + +var _ = g.Describe("[Feature:Platform][Smoke] Managed cluster should", func() { + defer g.GinkgoRecover() + + g.It("start all core operators", func() { + cfg, err := e2e.LoadConfig() + o.Expect(err).NotTo(o.HaveOccurred()) + c, err := e2e.LoadClientset() + o.Expect(err).NotTo(o.HaveOccurred()) + dc, err := dynamic.NewForConfig(cfg) + o.Expect(err).NotTo(o.HaveOccurred()) + + // presence of the CVO namespace gates this test + g.By("checking for the cluster version operator") + skipUnlessCVO(c.Core().Namespaces()) + + g.By("waiting for the cluster version to be applied") + cvc := dc.Resource(schema.GroupVersionResource{Group: "config.openshift.io", Resource: "clusterversions", Version: "v1"}) + var lastErr error + var lastCV objx.Map + if err := wait.PollImmediate(3*time.Second, cvoWait, func() (bool, error) { + obj, err := cvc.Get("version", metav1.GetOptions{}) + if err != nil { + lastErr = err + e2e.Logf("Unable to check for cluster version: %v", err) + return false, nil + } + cv := objx.Map(obj.UnstructuredContent()) + lastErr = nil + lastCV = cv + payload := cv.Get("status.current.payload").String() + if len(payload) == 0 { + e2e.Logf("ClusterVersion has no current payload version") + return false, nil + } + if cond := condition(cv, "Progressing"); cond.Get("status").String() != "False" { + e2e.Logf("ClusterVersion is still progressing: %s", cond.Get("message").String()) + return false, nil + } + if cond := condition(cv, "Available"); cond.Get("status").String() != "True" { + e2e.Logf("ClusterVersion is not available: %s", cond.Get("message").String()) + return false, nil + } + e2e.Logf("ClusterVersion available: %s", condition(cv, "Progressing").Get("message").String()) + return true, nil + }); err != nil { + o.Expect(lastErr).NotTo(o.HaveOccurred()) + e2e.Logf("Last cluster version seen: %s", lastCV) + if msg := condition(lastCV, "Failing").Get("message").String(); len(msg) > 0 { + e2e.Logf("ClusterVersion is reporting a failure: %s", msg) + } + e2e.Failf("ClusterVersion never became available: %s", condition(lastCV, "Progressing").Get("message").String()) + } + + // gate on all clusteroperators being ready + available := make(map[string]struct{}) + for _, group := range []string{"config.openshift.io", "operatorstatus.openshift.io"} { + g.By(fmt.Sprintf("waiting for all cluster operators in %s to be available", group)) + coc := dc.Resource(schema.GroupVersionResource{Group: group, Resource: "clusteroperators", Version: "v1"}) + lastErr = nil + var lastCOs []objx.Map + if err := wait.PollImmediate(time.Second, operatorWait, func() (bool, error) { + obj, err := coc.List(metav1.ListOptions{}) + if err != nil { + lastErr = err + e2e.Logf("Unable to check for cluster operators: %v", err) + return false, nil + } + cv := objx.Map(obj.UnstructuredContent()) + lastErr = nil + items := objects(cv.Get("items")) + lastCOs = items + + // TODO: make this an error condition once we know at least one cluster operator status is reported + if len(items) == 0 { + e2e.Logf("No cluster operators registered in %s", group) + return true, nil + } + + var unavailable []objx.Map + var unavailableNames []string + for _, co := range items { + if condition(co, "Available").Get("status").String() != "True" { + ns := co.Get("metadata.namespace").String() + name := co.Get("metadata.name").String() + unavailableNames = append(unavailableNames, fmt.Sprintf("%s/%s", ns, name)) + unavailable = append(unavailable, co) + break + } + } + if len(unavailable) > 0 { + e2e.Logf("Operators in group %s still unavailable: %s", group, strings.Join(unavailableNames, ", ")) + return false, nil + } + return true, nil + }); err != nil { + o.Expect(lastErr).NotTo(o.HaveOccurred()) + var unavailable []string + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, 0, 4, 1, ' ', 0) + fmt.Fprintf(w, "NAMESPACE\tNAME\tPROGRESSING\tAVAILABLE\tVERSION\tMESSAGE\n") + for _, co := range lastCOs { + ns := co.Get("metadata.namespace").String() + name := co.Get("metadata.name").String() + if condition(co, "Available").Get("status").String() != "True" { + unavailable = append(unavailable, fmt.Sprintf("%s/%s", ns, name)) + } else { + available[fmt.Sprintf("%s/%s", ns, name)] = struct{}{} + } + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", + ns, + name, + condition(co, "Progressing").Get("status").String(), + condition(co, "Available").Get("status").String(), + co.Get("status.version").String(), + condition(co, "Failing").Get("message").String(), + ) + } + w.Flush() + e2e.Logf("ClusterOperators:\n%s", buf.String()) + // TODO: make this an e2e.Failf() + e2e.Logf("Some cluster operators never became available %s", strings.Join(unavailable, ", ")) + } + } + if _, ok := available["openshift-cluster-dns-operator/openshift-dns"]; !ok { + e2e.Failf("A required operator was not available") + } + }) +}) + +func skipUnlessCVO(c coreclient.NamespaceInterface) { + err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { + _, err := c.Get("openshift-cluster-version", metav1.GetOptions{}) + if err == nil { + return true, nil + } + if errors.IsNotFound(err) { + e2e.Skipf("The cluster is not managed by a cluster-version operator") + } + e2e.Logf("Unable to check for cluster version operator: %v", err) + return false, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) +} + +func contains(names []string, name string) bool { + for _, s := range names { + if s == name { + return true + } + } + return false +} + +func jsonString(from objx.Map) string { + s, _ := from.JSON() + return s +} + +func objects(from *objx.Value) []objx.Map { + var values []objx.Map + switch { + case from.IsObjxMapSlice(): + return from.ObjxMapSlice() + case from.IsInterSlice(): + for _, i := range from.InterSlice() { + if msi, ok := i.(map[string]interface{}); ok { + values = append(values, objx.Map(msi)) + } + } + } + return values +} + +func condition(cv objx.Map, condition string) objx.Map { + for _, obj := range objects(cv.Get("status.conditions")) { + if obj.Get("type").String() == condition { + return obj + } + } + return objx.Map(nil) +} diff --git a/test/extended/prevapicontroller.sh b/test/extended/prevapicontroller.sh deleted file mode 100755 index 40bc6625851b..000000000000 --- a/test/extended/prevapicontroller.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# -# Runs compatibility tests with a previous controller and API server version -RUN_PREVIOUS_CONTROLLER=1 RUN_PREVIOUS_API=1 SKIP_TESTS="\[SkipPrevAPIAndControllers\]" \ - "$(dirname "${BASH_SOURCE}")/compatibility.sh" diff --git a/test/extended/prevcontroller.sh b/test/extended/prevcontroller.sh deleted file mode 100755 index cbb07be9e122..000000000000 --- a/test/extended/prevcontroller.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# -# Runs compatibility tests with a previous controller version -RUN_PREVIOUS_CONTROLLER=1 SKIP_TESTS="\[SkipPrevControllers\]" \ - "$(dirname "${BASH_SOURCE}")/compatibility.sh" diff --git a/test/extended/setup.sh b/test/extended/setup.sh deleted file mode 100644 index 2da1c9e48cce..000000000000 --- a/test/extended/setup.sh +++ /dev/null @@ -1,172 +0,0 @@ -#!/bin/bash -# -# This abstracts starting up an extended server. - -# If invoked with arguments, executes the test directly. -function os::test::extended::focus () { - if [[ "$@[@]" =~ "ginkgo.focus" ]]; then - os::log::fatal "the --ginkgo.focus flag is no longer supported, use FOCUS=foo instead." - exit 1 - fi - if [[ "$@[@]" =~ "-suite" ]]; then - os::log::fatal "the -suite flag is no longer supported, use SUITE=foo instead." - exit 1 - fi - if [[ -n "${FOCUS:-}" ]]; then - exitstatus=0 - - local skip="\[Serial\]" - if [[ -n "${SKIP:-}" ]]; then - skip+="|${SKIP}" - fi - # first run anything that isn't explicitly declared [Serial], and matches the $FOCUS, in a parallel mode. - os::log::info "Running parallel tests N=${PARALLEL_NODES:-} with focus ${FOCUS}" - TEST_REPORT_FILE_NAME=focus_parallel TEST_PARALLEL="${PARALLEL_NODES:-5}" os::test::extended::run -- -ginkgo.skip "${skip}" -test.timeout 6h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - - # Then run everything that requires serial and matches the $FOCUS, serially. - # there is bit of overlap here because not all serial tests declare [Serial], so they might have run in the - # parallel section above. Hopefully your focus was precise enough to exclude them, and we should be adding - # the [Serial] tag to them as needed. - os::log::info "" - os::log::info "Running serial tests with focus ${FOCUS}" - t=$FOCUS - FOCUS="\[Serial\].*?${t}" - TEST_REPORT_FILE_NAME=focus_serial os::test::extended::run -- -test.timeout 6h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - FOCUS="${t}.*?\[Serial\]" - TEST_REPORT_FILE_NAME=focus_serial2 os::test::extended::run -- -test.timeout 6h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - - exit $exitstatus - fi -} - -# Launches an extended server for OpenShift -# TODO: this should be doing less, because clusters should be stood up outside -# and then tests are executed. Tests that depend on fine grained setup should -# be done in other contexts. -function os::test::extended::setup () { - echo "" - os::log::warning "REMINDER, EXTENDED TESTS NO LONGER START A CLUSTER." - os::log::warning "THE CLUSTER REFERENCED BY THE 'KUBECONFIG' ENV VAR IS USED." - echo "" - - # build binaries - os::util::ensure::built_binary_exists 'ginkgo' 'vendor/github.com/onsi/ginkgo/ginkgo' - os::util::ensure::built_binary_exists 'extended.test' 'test/extended/extended.test' - os::util::ensure::built_binary_exists 'oc' - - # ensure proper relative directories are set - export KUBE_REPO_ROOT="${OS_ROOT}/vendor/k8s.io/kubernetes" - - os::util::environment::setup_time_vars - - # Allow setting $JUNIT_REPORT to toggle output behavior - if [[ -n "${JUNIT_REPORT:-}" ]]; then - # the Ginkgo tests also generate jUnit but expect different envars - export TEST_REPORT_DIR="${ARTIFACT_DIR}/junit" - mkdir -p $TEST_REPORT_DIR - fi - - function cleanup() { - return_code=$? - os::test::junit::generate_report - os::util::describe_return_code "${return_code}" - exit "${return_code}" - } - trap "cleanup" EXIT - - os::log::info "Running tests against existing cluster..." - return 0 -} - -# Run extended tests or print out a list of tests that need to be run -# Input: -# - FOCUS - the extended test focus -# - SKIP - the tests to skip -# - TEST_EXTENDED_SKIP - a global filter that allows additional tests to be omitted, will -# be joined with SKIP -# - SHOW_ALL - if set, then only print out tests to be run -# - TEST_PARALLEL - if set, run the tests in parallel with the specified number of nodes -# - Arguments - arguments to pass to ginkgo -function os::test::extended::run () { - local listArgs=() - local runArgs=() - - if [[ -n "${FOCUS-}" ]]; then - listArgs+=("--ginkgo.focus=${FOCUS}") - runArgs+=("-focus=${FOCUS}") - elif [[ -n "${SUITE-}" ]]; then - listArgs+=("--ginkgo.focus=${SUITE}") - runArgs+=("-focus=${SUITE}") - fi - - local skip="${SKIP-}" - # Allow additional skips to be provided on the command line - if [[ -n "${TEST_EXTENDED_SKIP-}" ]]; then - if [[ -n "${skip}" ]]; then - skip="${skip}|${TEST_EXTENDED_SKIP}" - else - skip="${TEST_EXTENDED_SKIP}" - fi - fi - if [[ -n "${skip}" ]]; then - listArgs+=("--ginkgo.skip=${skip}") - runArgs+=("-skip=${skip}") - fi - - if [[ -n "${TEST_PARALLEL-}" ]]; then - runArgs+=("-p" "-nodes=${TEST_PARALLEL}") - fi - - if [[ -n "${SHOW_ALL-}" ]]; then - PRINT_TESTS=1 - os::test::extended::test_list "${listArgs[@]:+"${listArgs[@]}"}" - return - fi - - os::test::extended::test_list "${listArgs[@]:+"${listArgs[@]}"}" - - if [[ "${TEST_COUNT}" -eq 0 ]]; then - os::log::warning "No tests were selected" - return - fi - - ginkgo -v -noColor "${runArgs[@]:+"${runArgs[@]}"}" "$( os::util::find::built_binary extended.test )" "$@" -} - -# Create a list of extended tests to be run with the given arguments -# Input: -# - Arguments to pass to ginkgo -# - SKIP_ONLY - If set, only selects tests to be skipped -# - PRINT_TESTS - If set, print the list of tests -# Output: -# - TEST_COUNT - the number of tests selected by the arguments -function os::test::extended::test_list () { - local full_test_list=() - local selected_tests=() - - while IFS= read -r; do - full_test_list+=( "${REPLY}" ) - done < <(TEST_OUTPUT_QUIET=true extended.test "$@" --ginkgo.dryRun --ginkgo.noColor ) - if [[ "${REPLY}" ]]; then lines+=( "$REPLY" ); fi - - for test in "${full_test_list[@]}"; do - if [[ -n "${SKIP_ONLY:-}" ]]; then - if grep -q "35mskip" <<< "${test}"; then - selected_tests+=( "${test}" ) - fi - else - if grep -q "1mok" <<< "${test}"; then - selected_tests+=( "${test}" ) - fi - fi - done - if [[ -n "${PRINT_TESTS:-}" ]]; then - if [[ ${#selected_tests[@]} -eq 0 ]]; then - os::log::warning "No tests were selected" - else - printf '%s\n' "${selected_tests[@]}" | sort - fi - fi - export TEST_COUNT=${#selected_tests[@]} -} -readonly -f os::test::extended::test_list diff --git a/test/extended/smoke.sh b/test/extended/smoke.sh deleted file mode 100755 index 6a1bb6fa5957..000000000000 --- a/test/extended/smoke.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# -# Runs the conformance extended tests for OpenShift -source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh" -source "${OS_ROOT}/test/extended/setup.sh" - -os::test::extended::setup -os::test::extended::focus "$@" - -exitstatus=0 - -# run parallel tests -os::log::info "Running parallel tests N=${PARALLEL_NODES:-}" -TEST_PARALLEL="${PARALLEL_NODES:-5}" TEST_REPORT_FILE_NAME=conformance_parallel os::test::extended::run -- -ginkgo.focus "\[Smoke\]" -test.timeout 6h ${TEST_EXTENDED_ARGS-} || exitstatus=$? - -exit $exitstatus diff --git a/test/extended/util/test.go b/test/extended/util/test.go index 7d298d9cee17..8411183524b9 100644 --- a/test/extended/util/test.go +++ b/test/extended/util/test.go @@ -7,7 +7,6 @@ import ( "path" "regexp" "strings" - "testing" "github.com/golang/glog" "github.com/onsi/ginkgo" @@ -46,13 +45,22 @@ var TestContext *e2e.TestContextType = &e2e.TestContext // KUBECONFIG - Path to kubeconfig containing embedded authinfo // TEST_REPORT_DIR - If set, JUnit output will be written to this directory for each test // TEST_REPORT_FILE_NAME - If set, will determine the name of the file that JUnit output is written to +func Init() { + flag.StringVar(&syntheticSuite, "suite", "", "DEPRECATED: Optional suite selector to filter which tests are run. Use focus.") + e2e.ViperizeFlags() + InitTest() +} + +func InitStandardFlags() { + e2e.RegisterCommonFlags() + e2e.RegisterClusterFlags() + e2e.RegisterStorageFlags() +} + func InitTest() { // interpret synthetic input in `--ginkgo.focus` and/or `--ginkgo.skip` ginkgo.BeforeEach(checkSyntheticInput) - flag.StringVar(&syntheticSuite, "suite", "", "DEPRECATED: Optional suite selector to filter which tests are run. Use focus.") - e2e.ViperizeFlags() - TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" TestContext.VerifyServiceAccount = true TestContext.RepoRoot = os.Getenv("KUBE_REPO_ROOT") @@ -82,10 +90,10 @@ func InitTest() { // Ensure that Kube tests run privileged (like they do upstream) TestContext.CreateTestingNS = createTestingNS - glog.Infof("Extended test version %s", version.Get().String()) + glog.V(2).Infof("Extended test version %s", version.Get().String()) } -func ExecuteTest(t *testing.T, suite string) { +func ExecuteTest(t ginkgo.GinkgoTestingT, suite string) { var r []ginkgo.Reporter if dir := os.Getenv("TEST_REPORT_DIR"); len(dir) > 0 { @@ -109,6 +117,17 @@ func ExecuteTest(t *testing.T, suite string) { r = append(r, reporters.NewJUnitReporter(path.Join(TestContext.ReportDir, fmt.Sprintf("%s_%02d.xml", reportFileName, config.GinkgoConfig.ParallelNode)))) } + AnnotateTestSuite() + + if quiet { + r = append(r, NewSimpleReporter()) + ginkgo.RunSpecsWithCustomReporters(t, suite, r) + } else { + ginkgo.RunSpecsWithDefaultAndCustomReporters(t, suite, r) + } +} + +func AnnotateTestSuite() { matches := make(map[string]*regexp.Regexp) for label, items := range testMaps { matches[label] = regexp.MustCompile(strings.Join(items, `|`)) @@ -154,13 +173,6 @@ func ExecuteTest(t *testing.T, suite string) { } node.SetText(node.Text() + labels) }) - - if quiet { - r = append(r, NewSimpleReporter()) - ginkgo.RunSpecsWithCustomReporters(t, suite, r) - } else { - ginkgo.RunSpecsWithDefaultAndCustomReporters(t, suite, r) - } } // TODO: Use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/pull/228 to implement this. @@ -370,6 +382,12 @@ var ( `Should be able to support the 1.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195 }, + // tests that will pass in 4.0 + // TODO: this will be removed once 4.0 passes all conformance tests + "[Suite:openshift/smoke-4]": { + `Secrets should be consumable from pods in volume with defaultMode set`, + `Managed cluster should start all core operators`, + }, } excludedTests = []string{ diff --git a/vendor/github.com/onsi/ginkgo/extension.go b/vendor/github.com/onsi/ginkgo/extension.go new file mode 100644 index 000000000000..f9ddc2597b82 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/extension.go @@ -0,0 +1,14 @@ +package ginkgo + +import ( + "github.com/onsi/ginkgo/internal/suite" + "github.com/onsi/ginkgo/internal/writer" +) + +func GlobalSuite() *suite.Suite { + return globalSuite +} + +func GinkgoWriterType() *writer.Writer { + return GinkgoWriter.(*writer.Writer) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite_extension.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite_extension.go new file mode 100644 index 000000000000..9adbd3352d13 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite_extension.go @@ -0,0 +1,27 @@ +package suite + +import ( + "math/rand" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/internal/spec_iterator" +) + +func (suite *Suite) Iterator(config config.GinkgoConfigType) spec_iterator.SpecIterator { + specsSlice := []*spec.Spec{} + for _, collatedNodes := range suite.topLevelContainer.Collate() { + specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) + } + + specs := spec.NewSpecs(specsSlice) + + if config.RandomizeAllSpecs { + specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) + } + + if config.SkipMeasurements { + specs.SkipMeasurements() + } + return spec_iterator.NewSerialIterator(specs.Specs()) +} diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 8366bc9be5b8..2e40fbfa4fb3 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -1,6 +1,7 @@ package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -29,10 +30,31 @@ func Resource(resource string) schema.GroupResource { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(GroupVersion, - &Image{}, - &ImageList{}, + &Authentication{}, + &AuthenticationList{}, &Build{}, &BuildList{}, + &Console{}, + &ConsoleList{}, + &DNS{}, + &DNSList{}, + &IdentityProvider{}, + &IdentityProviderList{}, + &Image{}, + &ImageList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Ingress{}, + &IngressList{}, + &Network{}, + &NetworkList{}, + &OAuth{}, + &OAuthList{}, + &Project{}, + &ProjectList{}, + &Scheduling{}, + &SchedulingList{}, ) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 8cb274c5ba2d..727db57b364c 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -1,78 +1,10 @@ package v1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Image holds cluster-wide information about how to handle images. The canonical name is `cluster` -type Image struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty"` - - // spec holds user settable values for configuration - Spec ImageSpec `json:"spec"` - // status holds observed values from the cluster. They may not be overridden. - Status ImageStatus `json:"status"` -} - -type ImageSpec struct { - // AllowedRegistriesForImport limits the container image registries that normal users may import - // images from. Set this list to the registries that you trust to contain valid Docker - // images and that you want applications to be able to import from. Users with - // permission to create Images or ImageStreamMappings via the API are not affected by - // this policy - typically only administrators or system integrations will have those - // permissions. - AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` - - // ExternalRegistryHostname sets the hostname for the default external image - // registry. The external hostname should be set only when the image registry - // is exposed externally. The value is used in 'publicDockerImageRepository' - // field in ImageStreams. The value must be in "hostname[:port]" format. - ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"` - - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that - // should be trusted during imagestream import. - AdditionalTrustedCA ConfigMapReference `json:"additionalTrustedCA,omitempty"` -} - -type ImageStatus struct { - - // this value is set by the image registry operator which controls the internal registry hostname - // InternalRegistryHostname sets the hostname for the default internal image - // registry. The value must be in "hostname[:port]" format. - // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY - // environment variable but this setting overrides the environment variable. - InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ImageList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ListMeta `json:"metadata,omitempty"` - Items []Image `json:"items"` -} - -// RegistryLocation contains a location of the registry specified by the registry domain -// name. The domain name might include wildcards, like '*' or '??'. -type RegistryLocation struct { - // DomainName specifies a domain name for the registry - // In case the registry use non-standard (80 or 443) port, the port should be included - // in the domain name as well. - DomainName string `json:"domainName"` - // Insecure indicates whether the registry is secure (https) or insecure (http) - // By default (if not specified) the registry is assumed as secure. - Insecure bool `json:"insecure,omitempty"` -} - // ConfigMapReference references the location of a configmap. type ConfigMapReference struct { Namespace string `json:"namespace"` @@ -81,96 +13,6 @@ type ConfigMapReference struct { Key string `json:"filename,omitempty"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Build holds cluster-wide information on how to handle builds. The canonical name is `cluster` -type Build struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the build controller configuration - // +optional - Spec BuildSpec `json:"spec,omitempty"` -} - -type BuildSpec struct { - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that - // should be trusted for image pushes and pulls during builds. - // +optional - AdditionalTrustedCA ConfigMapReference `json:"additionalTrustedCA,omitempty"` - // BuildDefaults controls the default information for Builds - // +optional - BuildDefaults BuildDefaults `json:"buildDefaults,omitempty"` - // BuildOverrides controls override settings for builds - // +optional - BuildOverrides BuildOverrides `json:"buildOverrides,omitempty"` -} - -type BuildDefaults struct { - // GitHTTPProxy is the location of the HTTPProxy for Git source - // +optional - GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` - - // GitHTTPSProxy is the location of the HTTPSProxy for Git source - // +optional - GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` - - // GitNoProxy is the list of domains for which the proxy should not be used - // +optional - GitNoProxy string `json:"gitNoProxy,omitempty"` - - // Env is a set of default environment variables that will be applied to the - // build if the specified variables do not exist on the build - // +optional - Env []corev1.EnvVar `json:"env,omitempty"` - - // ImageLabels is a list of docker labels that are applied to the resulting image. - // User can override a default label by providing a label with the same name in their - // Build/BuildConfig. - // +optional - ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - - // Resources defines resource requirements to execute the build. - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty"` -} - -type ImageLabel struct { - // Name defines the name of the label. It must have non-zero length. - Name string `json:"name"` - - // Value defines the literal value of the label. - // +optional - Value string `json:"value,omitempty"` -} - -type BuildOverrides struct { - // ImageLabels is a list of docker labels that are applied to the resulting image. - // If user provided a label in their Build/BuildConfig with the same name as one in this - // list, the user's label will be overwritten. - // +optional - ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - - // NodeSelector is a selector which must be true for the build pod to fit on a node - // +optional - NodeSelector metav1.LabelSelector `json:"nodeSelector,omitempty"` - - // Tolerations is a list of Tolerations that will override any existing - // tolerations set on a build pod. - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type BuildList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ListMeta `json:"metadata,omitempty"` - Items []Build `json:"items"` -} - // HTTPServingInfo holds configuration for serving HTTP type HTTPServingInfo struct { // ServingInfo is the HTTP serving information diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go new file mode 100644 index 000000000000..281dca7acd31 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -0,0 +1,39 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication holds cluster-wide information about Authentication. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Authentication struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec AuthenticationSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status AuthenticationStatus `json:"status"` +} + +type AuthenticationSpec struct { + // webhook token auth config (ttl) + // external token address + // serviceAccountOAuthGrantMethod or remove/disallow it as an option +} + +type AuthenticationStatus struct { + // internal token address +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Authentication `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go new file mode 100644 index 000000000000..480c1d321cee --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -0,0 +1,96 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build holds cluster-wide information on how to handle builds. The canonical name is `cluster` +type Build struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec holds user-settable values for the build controller configuration + // +optional + Spec BuildSpec `json:"spec,omitempty"` +} + +type BuildSpec struct { + // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted for image pushes and pulls during builds. + // +optional + AdditionalTrustedCA ConfigMapReference `json:"additionalTrustedCA,omitempty"` + // BuildDefaults controls the default information for Builds + // +optional + BuildDefaults BuildDefaults `json:"buildDefaults,omitempty"` + // BuildOverrides controls override settings for builds + // +optional + BuildOverrides BuildOverrides `json:"buildOverrides,omitempty"` +} + +type BuildDefaults struct { + // GitHTTPProxy is the location of the HTTPProxy for Git source + // +optional + GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` + + // GitHTTPSProxy is the location of the HTTPSProxy for Git source + // +optional + GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` + + // GitNoProxy is the list of domains for which the proxy should not be used + // +optional + GitNoProxy string `json:"gitNoProxy,omitempty"` + + // Env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // ImageLabels is a list of docker labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // Resources defines resource requirements to execute the build. + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} + +type ImageLabel struct { + // Name defines the name of the label. It must have non-zero length. + Name string `json:"name"` + + // Value defines the literal value of the label. + // +optional + Value string `json:"value,omitempty"` +} + +type BuildOverrides struct { + // ImageLabels is a list of docker labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // NodeSelector is a selector which must be true for the build pod to fit on a node + // +optional + NodeSelector metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // Tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type BuildList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Build `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go new file mode 100644 index 000000000000..36beb4113c73 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -0,0 +1,37 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console holds cluster-wide information about Console. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Console struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec ConsoleSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status ConsoleStatus `json:"status"` +} + +type ConsoleSpec struct { + // special console public url? +} + +type ConsoleStatus struct { + // console public url +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Console `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go new file mode 100644 index 000000000000..44fa6e4d27de --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -0,0 +1,36 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type DNS struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec DNSSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status DNSStatus `json:"status"` +} + +type DNSSpec struct { +} + +type DNSStatus struct { + // dnsSuffix (service-ca amongst others) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_idp.go b/vendor/github.com/openshift/api/config/v1/types_idp.go new file mode 100644 index 000000000000..c2425a068f29 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_idp.go @@ -0,0 +1,36 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IdentityProvider holds cluster-wide information about IdentityProvider. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type IdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec IdentityProviderSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status IdentityProviderStatus `json:"status"` +} + +type IdentityProviderSpec struct { + // all the IDP settings +} + +type IdentityProviderStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IdentityProviderList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []IdentityProvider `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go new file mode 100644 index 000000000000..8fbf126a6f89 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -0,0 +1,70 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image holds cluster-wide information about how to handle images. The canonical name is `cluster` +type Image struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec ImageSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status ImageStatus `json:"status"` +} + +type ImageSpec struct { + // AllowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` + + // ExternalRegistryHostname sets the hostname for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"` + + // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import. + AdditionalTrustedCA ConfigMapReference `json:"additionalTrustedCA,omitempty"` +} + +type ImageStatus struct { + + // this value is set by the image registry operator which controls the internal registry hostname + // InternalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ImageList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Image `json:"items"` +} + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // DomainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // Insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + Insecure bool `json:"insecure,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go new file mode 100644 index 000000000000..234e872c0b8c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -0,0 +1,38 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec InfrastructureSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status InfrastructureStatus `json:"status"` +} + +type InfrastructureSpec struct { + // secret reference? + // configmap reference to file? +} + +type InfrastructureStatus struct { + // type +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Infrastructure `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go new file mode 100644 index 000000000000..e8467a090683 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -0,0 +1,36 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress holds cluster-wide information about Ingress. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec IngressSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status IngressStatus `json:"status"` +} + +type IngressSpec struct { + // default suffix. It goes here or it gets removed from server +} + +type IngressStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Ingress `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go new file mode 100644 index 000000000000..aaea1aab1450 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -0,0 +1,39 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network holds cluster-wide information about Network. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Network struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec NetworkSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status NetworkStatus `json:"status"` +} + +type NetworkSpec struct { + // serviceCIDR + // servicePortRange + // vxlanPort + // ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` +} + +type NetworkStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Network `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go new file mode 100644 index 000000000000..d4402ed33831 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -0,0 +1,37 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type OAuth struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec OAuthSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status OAuthStatus `json:"status"` +} + +type OAuthSpec struct { + // options for configuring the embedded oauth server. + // possibly wellknown? +} + +type OAuthStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type OAuthList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []OAuth `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go new file mode 100644 index 000000000000..4280614fbffa --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -0,0 +1,37 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds cluster-wide information about Project. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Project struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec ProjectSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status ProjectStatus `json:"status"` +} + +type ProjectSpec struct { + // project request message + // project request template +} + +type ProjectStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Project `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go new file mode 100644 index 000000000000..603ed9054489 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -0,0 +1,36 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scheduling holds cluster-wide information about Scheduling. The canonical name is `cluster` +// TODO this object is an example of a possible grouping and is subject to change or removal +type Scheduling struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + Spec SchedulingSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + Status SchedulingStatus `json:"status"` +} + +type SchedulingSpec struct { + // default node selector (I would be happy to see this die....) +} + +type SchedulingStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SchedulingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata,omitempty"` + Items []Scheduling `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/types_swagger_doc_generated.go index 5a1c8de1331c..7dfb4cd69f85 100644 --- a/vendor/github.com/openshift/api/config/v1/types_swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/types_swagger_doc_generated.go @@ -39,56 +39,6 @@ func (AuditConfig) SwaggerDoc() map[string]string { return map_AuditConfig } -var map_Build = map[string]string{ - "": "Build holds cluster-wide information on how to handle builds. The canonical name is `cluster`", - "spec": "Spec holds user-settable values for the build controller configuration", -} - -func (Build) SwaggerDoc() map[string]string { - return map_Build -} - -var map_BuildDefaults = map[string]string{ - "gitHTTPProxy": "GitHTTPProxy is the location of the HTTPProxy for Git source", - "gitHTTPSProxy": "GitHTTPSProxy is the location of the HTTPSProxy for Git source", - "gitNoProxy": "GitNoProxy is the list of domains for which the proxy should not be used", - "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", - "resources": "Resources defines resource requirements to execute the build.", -} - -func (BuildDefaults) SwaggerDoc() map[string]string { - return map_BuildDefaults -} - -var map_BuildList = map[string]string{ - "metadata": "Standard object's metadata.", -} - -func (BuildList) SwaggerDoc() map[string]string { - return map_BuildList -} - -var map_BuildOverrides = map[string]string{ - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", - "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", - "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", -} - -func (BuildOverrides) SwaggerDoc() map[string]string { - return map_BuildOverrides -} - -var map_BuildSpec = map[string]string{ - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds.", - "buildDefaults": "BuildDefaults controls the default information for Builds", - "buildOverrides": "BuildOverrides controls override settings for builds", -} - -func (BuildSpec) SwaggerDoc() map[string]string { - return map_BuildSpec -} - var map_CertInfo = map[string]string{ "": "CertInfo relates a certificate with a private key", "certFile": "CertFile is a file containing a PEM-encoded certificate", @@ -159,52 +109,6 @@ func (HTTPServingInfo) SwaggerDoc() map[string]string { return map_HTTPServingInfo } -var map_Image = map[string]string{ - "": "Image holds cluster-wide information about how to handle images. The canonical name is `cluster`", - "metadata": "Standard object's metadata.", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", -} - -func (Image) SwaggerDoc() map[string]string { - return map_Image -} - -var map_ImageLabel = map[string]string{ - "name": "Name defines the name of the label. It must have non-zero length.", - "value": "Value defines the literal value of the label.", -} - -func (ImageLabel) SwaggerDoc() map[string]string { - return map_ImageLabel -} - -var map_ImageList = map[string]string{ - "metadata": "Standard object's metadata.", -} - -func (ImageList) SwaggerDoc() map[string]string { - return map_ImageList -} - -var map_ImageSpec = map[string]string{ - "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", - "externalRegistryHostname": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import.", -} - -func (ImageSpec) SwaggerDoc() map[string]string { - return map_ImageSpec -} - -var map_ImageStatus = map[string]string{ - "internalRegistryHostname": "this value is set by the image registry operator which controls the internal registry hostname InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", -} - -func (ImageStatus) SwaggerDoc() map[string]string { - return map_ImageStatus -} - var map_KubeClientConfig = map[string]string{ "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", @@ -237,16 +141,6 @@ func (NamedCertificate) SwaggerDoc() map[string]string { return map_NamedCertificate } -var map_RegistryLocation = map[string]string{ - "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", -} - -func (RegistryLocation) SwaggerDoc() map[string]string { - return map_RegistryLocation -} - var map_RemoteConnectionInfo = map[string]string{ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", "url": "URL is the remote URL to connect to", diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 0388113b6ab1..e4af30f2ffdc 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -43,6 +43,99 @@ func (in *AuditConfig) DeepCopy() *AuditConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Build) DeepCopyInto(out *Build) { *out = *in @@ -229,117 +322,120 @@ func (in *ConfigMapReference) DeepCopy() *ConfigMapReference { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { +func (in *Console) DeepCopyInto(out *Console) { *out = *in - if in.URLs != nil { - in, out := &in.URLs, &out.URLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.CertInfo = in.CertInfo + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. -func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { if in == nil { return nil } - out := new(EtcdConnectionInfo) + out := new(Console) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { *out = *in - in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. -func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { if in == nil { return nil } - out := new(EtcdStorageConfig) + out := new(ConsoleList) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { *out = *in - in.ServingInfo.DeepCopyInto(&out.ServingInfo) - if in.CORSAllowedOrigins != nil { - in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.AuditConfig.DeepCopyInto(&out.AuditConfig) - in.StorageConfig.DeepCopyInto(&out.StorageConfig) - if in.AdmissionPluginConfig != nil { - in, out := &in.AdmissionPluginConfig, &out.AdmissionPluginConfig - *out = make(map[string]AdmissionPluginConfig, len(*in)) - for key, val := range *in { - newVal := new(AdmissionPluginConfig) - val.DeepCopyInto(newVal) - (*out)[key] = *newVal - } - } - out.KubeClientConfig = in.KubeClientConfig return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. -func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { if in == nil { return nil } - out := new(GenericAPIServerConfig) + out := new(ConsoleSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { *out = *in - in.ServingInfo.DeepCopyInto(&out.ServingInfo) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. -func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { if in == nil { return nil } - out := new(HTTPServingInfo) + out := new(ConsoleStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { +func (in *DNS) DeepCopyInto(out *DNS) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec out.Status = in.Status return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { if in == nil { return nil } - out := new(Image) + out := new(DNS) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Image) DeepCopyObject() runtime.Object { +func (in *DNS) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -347,29 +443,13 @@ func (in *Image) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. -func (in *ImageLabel) DeepCopy() *ImageLabel { - if in == nil { - return nil - } - out := new(ImageLabel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageList) DeepCopyInto(out *ImageList) { +func (in *DNSList) DeepCopyInto(out *DNSList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Image, len(*in)) + *out = make([]DNS, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -377,18 +457,18 @@ func (in *ImageList) DeepCopyInto(out *ImageList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. -func (in *ImageList) DeepCopy() *ImageList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { if in == nil { return nil } - out := new(ImageList) + out := new(DNSList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageList) DeepCopyObject() runtime.Object { +func (in *DNSList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -396,130 +476,980 @@ func (in *ImageList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { *out = *in - if in.AllowedRegistriesForImport != nil { - in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport - *out = make([]RegistryLocation, len(*in)) - copy(*out, *in) - } - out.AdditionalTrustedCA = in.AdditionalTrustedCA return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. -func (in *ImageSpec) DeepCopy() *ImageSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { if in == nil { return nil } - out := new(ImageSpec) + out := new(DNSSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. -func (in *ImageStatus) DeepCopy() *ImageStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { if in == nil { return nil } - out := new(ImageStatus) + out := new(DNSStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { *out = *in - out.ConnectionOverrides = in.ConnectionOverrides + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. -func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { if in == nil { return nil } - out := new(KubeClientConfig) + out := new(EtcdConnectionInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { *out = *in - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod + in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. -func (in *LeaderElection) DeepCopy() *LeaderElection { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { if in == nil { return nil } - out := new(LeaderElection) + out := new(EtcdStorageConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { +func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins *out = make([]string, len(*in)) copy(*out, *in) } - out.CertInfo = in.CertInfo + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + in.StorageConfig.DeepCopyInto(&out.StorageConfig) + if in.AdmissionPluginConfig != nil { + in, out := &in.AdmissionPluginConfig, &out.AdmissionPluginConfig + *out = make(map[string]AdmissionPluginConfig, len(*in)) + for key, val := range *in { + newVal := new(AdmissionPluginConfig) + val.DeepCopyInto(newVal) + (*out)[key] = *newVal + } + } + out.KubeClientConfig = in.KubeClientConfig return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. -func (in *NamedCertificate) DeepCopy() *NamedCertificate { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. +func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { if in == nil { return nil } - out := new(NamedCertificate) + out := new(GenericAPIServerConfig) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. -func (in *RegistryLocation) DeepCopy() *RegistryLocation { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { if in == nil { return nil } - out := new(RegistryLocation) + out := new(HTTPServingInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { *out = *in - out.CertInfo = in.CertInfo + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. -func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { if in == nil { return nil } - out := new(RemoteConnectionInfo) + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderList) DeepCopyInto(out *IdentityProviderList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderList. +func (in *IdentityProviderList) DeepCopy() *IdentityProviderList { + if in == nil { + return nil + } + out := new(IdentityProviderList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityProviderList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderSpec) DeepCopyInto(out *IdentityProviderSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderSpec. +func (in *IdentityProviderSpec) DeepCopy() *IdentityProviderSpec { + if in == nil { + return nil + } + out := new(IdentityProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderStatus) DeepCopyInto(out *IdentityProviderStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderStatus. +func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus { + if in == nil { + return nil + } + out := new(IdentityProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + out.AdditionalTrustedCA = in.AdditionalTrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { + *out = *in + out.ConnectionOverrides = in.ConnectionOverrides + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. +func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { + if in == nil { + return nil + } + out := new(KubeClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. +func (in *LeaderElection) DeepCopy() *LeaderElection { + if in == nil { + return nil + } + out := new(LeaderElection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth) DeepCopyInto(out *OAuth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. +func (in *OAuth) DeepCopy() *OAuth { + if in == nil { + return nil + } + out := new(OAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthList) DeepCopyInto(out *OAuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. +func (in *OAuthList) DeepCopy() *OAuthList { + if in == nil { + return nil + } + out := new(OAuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. +func (in *OAuthSpec) DeepCopy() *OAuthSpec { + if in == nil { + return nil + } + out := new(OAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. +func (in *OAuthStatus) DeepCopy() *OAuthStatus { + if in == nil { + return nil + } + out := new(OAuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scheduling) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingList) DeepCopyInto(out *SchedulingList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Scheduling, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingList. +func (in *SchedulingList) DeepCopy() *SchedulingList { + if in == nil { + return nil + } + out := new(SchedulingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingSpec) DeepCopyInto(out *SchedulingSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingSpec. +func (in *SchedulingSpec) DeepCopy() *SchedulingSpec { + if in == nil { + return nil + } + out := new(SchedulingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulingStatus) DeepCopyInto(out *SchedulingStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulingStatus. +func (in *SchedulingStatus) DeepCopy() *SchedulingStatus { + if in == nil { + return nil + } + out := new(SchedulingStatus) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/openshift/client-go/glide.lock b/vendor/github.com/openshift/client-go/glide.lock index b77639b988b0..13b8e3375124 100644 --- a/vendor/github.com/openshift/client-go/glide.lock +++ b/vendor/github.com/openshift/client-go/glide.lock @@ -1,5 +1,5 @@ hash: 63644647f862ec7482e1c18412fc08babd74694a38b4ca257e42a0656677f513 -updated: 2018-10-30T18:13:47.364073464-04:00 +updated: 2018-11-07T13:15:35.008895803-05:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 @@ -49,7 +49,7 @@ imports: - name: github.com/modern-go/reflect2 version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd - name: github.com/openshift/api - version: 2699ad42427b7e7b2cad1daefc93b632c9c0bb6c + version: fd594d07117b2d47134db828fb9ea85efa2a0c2f subpackages: - apps/v1 - authorization/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/generic_soak.go b/vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/generic_soak.go index 75f71bc0097c..79c5cc500071 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/generic_soak.go +++ b/vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/generic_soak.go @@ -50,13 +50,11 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti scale := framework.TestContext.LoggingSoak.Scale if framework.TestContext.LoggingSoak.Scale == 0 { scale = 1 - framework.Logf("Overriding default scale value of zero to %d", scale) } milliSecondsBetweenWaves := framework.TestContext.LoggingSoak.MilliSecondsBetweenWaves if milliSecondsBetweenWaves == 0 { milliSecondsBetweenWaves = 5000 - framework.Logf("Overriding default milliseconds value of zero to %d", milliSecondsBetweenWaves) } return scale, time.Duration(milliSecondsBetweenWaves) * time.Millisecond