diff --git a/Makefile b/Makefile index 08765f9121..24d3ca7dfc 100644 --- a/Makefile +++ b/Makefile @@ -160,7 +160,7 @@ build/helm-operator-%-linux-gnu: GOARGS = GOOS=linux build/%: $(SOURCES) ## Build the operator-sdk binary $(Q){ \ - cmdpkg=$$(echo $* | sed "s/\(operator-sdk\|ansible-operator\|helm-operator\).*/\1/"); \ + cmdpkg=$$(echo $* | sed -E "s/(operator-sdk|ansible-operator|helm-operator).*/\1/"); \ $(GOARGS) go build $(GO_BUILD_ARGS) -o $@ ./cmd/$$cmdpkg; \ } diff --git a/changelog/fragments/scorecard-label-perms.yaml b/changelog/fragments/scorecard-label-perms.yaml new file mode 100644 index 0000000000..6a83700dfd --- /dev/null +++ b/changelog/fragments/scorecard-label-perms.yaml @@ -0,0 +1,5 @@ +entries: + - description: > + Fixed a bug in scorecard that caused tests to fail with permission errors + when loading the bundle. + kind: "bugfix" diff --git a/changelog/fragments/scorecard-output-api.yaml b/changelog/fragments/scorecard-output-api.yaml new file mode 100644 index 0000000000..482c8ba08d --- /dev/null +++ b/changelog/fragments/scorecard-output-api.yaml @@ -0,0 +1,15 @@ +# entries is a list of entries to include in +# release notes and/or the migration guide +entries: + - description: > + Changed scorecard text and json output to use a `v1alpha3.TestList` + instead of aggregating all test results under a single + `v1alpha3.Test` and set exit status to 1 when a test fails. + kind: "change" + breaking: true + migration: + header: Alpha scorecard output API updates + body: | + Update any scripts interpretting the scorecard output to + understand the v1alpha3 TestList format. + diff --git a/cmd/ansible-operator/main.go b/cmd/ansible-operator/main.go index 101d180974..9c0678ba7c 100644 --- a/cmd/ansible-operator/main.go +++ b/cmd/ansible-operator/main.go @@ -15,21 +15,284 @@ package main import ( - log "github.com/sirupsen/logrus" + "context" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "strings" + "github.com/spf13/pflag" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" - "github.com/operator-framework/operator-sdk/pkg/ansible" - aoflags "github.com/operator-framework/operator-sdk/pkg/ansible/flags" + "github.com/operator-framework/operator-sdk/pkg/ansible/controller" + "github.com/operator-framework/operator-sdk/pkg/ansible/flags" + "github.com/operator-framework/operator-sdk/pkg/ansible/proxy" + "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap" + "github.com/operator-framework/operator-sdk/pkg/ansible/runner" + "github.com/operator-framework/operator-sdk/pkg/ansible/watches" + "github.com/operator-framework/operator-sdk/pkg/k8sutil" + kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" + "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" + "github.com/operator-framework/operator-sdk/pkg/metrics" + sdkVersion "github.com/operator-framework/operator-sdk/version" +) + +var ( + metricsHost = "0.0.0.0" + log = logf.Log.WithName("cmd") + metricsPort int32 = 8383 + operatorMetricsPort int32 = 8686 + healthProbePort int32 = 6789 ) +func printVersion() { + log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) + log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) + log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) +} + func main() { - flags := aoflags.AddTo(pflag.CommandLine) + f := &flags.Flags{} + f.AddTo(pflag.CommandLine) pflag.Parse() logf.SetLogger(zap.Logger()) - if err := ansible.Run(flags); err != nil { - log.Fatal(err) + printVersion() + + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "Failed to get config.") + os.Exit(1) + } + + // Set default manager options + // TODO: probably should expose the host & port as an environment variables + options := manager.Options{ + HealthProbeBindAddress: fmt.Sprintf("%s:%d", metricsHost, healthProbePort), + MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), + NewClient: func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { + c, err := client.New(config, options) + if err != nil { + return nil, err + } + return &client.DelegatingClient{ + Reader: cache, + Writer: c, + StatusClient: c, + }, nil + }, + } + + namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar) + log = log.WithValues("Namespace", namespace) + if found { + if namespace == metav1.NamespaceAll { + log.Info("Watching all namespaces.") + options.Namespace = metav1.NamespaceAll + } else { + if strings.Contains(namespace, ",") { + log.Info("Watching multiple namespaces.") + options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) + } else { + log.Info("Watching single namespace.") + options.Namespace = namespace + } + } + } else { + log.Info(fmt.Sprintf("%v environment variable not set. Watching all namespaces.", + k8sutil.WatchNamespaceEnvVar)) + options.Namespace = metav1.NamespaceAll + } + + // Create a new manager to provide shared dependencies and start components + mgr, err := manager.New(cfg, options) + if err != nil { + log.Error(err, "Failed to create a new manager.") + os.Exit(1) + } + + var gvks []schema.GroupVersionKind + cMap := controllermap.NewControllerMap() + watches, err := watches.Load(f.WatchesFile, f.MaxWorkers, f.AnsibleVerbosity) + if err != nil { + log.Error(err, "Failed to load watches.") + os.Exit(1) + } + for _, w := range watches { + runner, err := runner.New(w) + if err != nil { + log.Error(err, "Failed to create runner") + os.Exit(1) + } + + ctr := controller.Add(mgr, controller.Options{ + GVK: w.GroupVersionKind, + Runner: runner, + ManageStatus: w.ManageStatus, + AnsibleDebugLogs: getAnsibleDebugLog(), + MaxWorkers: w.MaxWorkers, + ReconcilePeriod: w.ReconcilePeriod, + Selector: w.Selector, + }) + if ctr == nil { + log.Error(fmt.Errorf("failed to add controller for GVK %v", w.GroupVersionKind.String()), "") + os.Exit(1) + } + + cMap.Store(w.GroupVersionKind, &controllermap.Contents{Controller: *ctr, + WatchDependentResources: w.WatchDependentResources, + WatchClusterScopedResources: w.WatchClusterScopedResources, + OwnerWatchMap: controllermap.NewWatchMap(), + AnnotationWatchMap: controllermap.NewWatchMap(), + }, w.Blacklist) + gvks = append(gvks, w.GroupVersionKind) + } + + operatorName, err := k8sutil.GetOperatorName() + if err != nil { + log.Error(err, "Failed to get the operator name") + os.Exit(1) + } + + // Become the leader before proceeding + err = leader.Become(context.TODO(), operatorName+"-lock") + if err != nil { + log.Error(err, "Failed to become leader.") + os.Exit(1) + } + + addMetrics(context.TODO(), cfg, gvks) + err = mgr.AddHealthzCheck("ping", healthz.Ping) + if err != nil { + log.Error(err, "Failed to add Healthz check.") + } + + done := make(chan error) + + // start the proxy + err = proxy.Run(done, proxy.Options{ + Address: "localhost", + Port: 8888, + KubeConfig: mgr.GetConfig(), + Cache: mgr.GetCache(), + RESTMapper: mgr.GetRESTMapper(), + ControllerMap: cMap, + OwnerInjection: f.InjectOwnerRef, + WatchedNamespaces: []string{namespace}, + }) + if err != nil { + log.Error(err, "Error starting proxy.") + os.Exit(1) + } + + // start the operator + go func() { + done <- mgr.Start(signals.SetupSignalHandler()) + }() + + // wait for either to finish + err = <-done + if err != nil { + log.Error(err, "Proxy or operator exited with error.") + os.Exit(1) + } + log.Info("Exiting.") +} + +// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using +// the Prometheus operator +func addMetrics(ctx context.Context, cfg *rest.Config, gvks []schema.GroupVersionKind) { + // Get the namespace the operator is currently deployed in. + operatorNs, err := k8sutil.GetOperatorNamespace() + if err != nil { + if errors.Is(err, k8sutil.ErrRunLocal) { + log.Info("Skipping CR metrics server creation; not running in a cluster.") + return + } + } + + if err := serveCRMetrics(cfg, operatorNs, gvks); err != nil { + log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) + } + + // Add to the below struct any other metrics ports you want to expose. + servicePorts := []v1.ServicePort{ + {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, + {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, + } + + // Create Service object to expose the metrics port(s). + service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) + if err != nil { + log.Info("Could not create metrics Service", "error", err.Error()) + return + } + + // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources + // necessary to configure Prometheus to scrape metrics from this operator. + services := []*v1.Service{service} + + // The ServiceMonitor is created in the same namespace where the operator is deployed + _, err = metrics.CreateServiceMonitors(cfg, operatorNs, services) + if err != nil { + log.Info("Could not create ServiceMonitor object", "error", err.Error()) + // If this operator is deployed to a cluster without the prometheus-operator running, it will return + // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. + if err == metrics.ErrServiceMonitorNotPresent { + log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) + } + } +} + +// serveCRMetrics takes GVKs retrieved from watches and generates metrics based on those types. +// It serves those metrics on "http://metricsHost:operatorMetricsPort". +func serveCRMetrics(cfg *rest.Config, operatorNs string, gvks []schema.GroupVersionKind) error { + // The metrics will be generated from the namespaces which are returned here. + // NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error. + ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs) + if err != nil { + return err + } + + // Generate and serve custom resource specific metrics. + err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, gvks, metricsHost, operatorMetricsPort) + if err != nil { + return err + } + return nil +} + +// getAnsibleDebugLog return the value from the ANSIBLE_DEBUG_LOGS it order to +// print the full Ansible logs +func getAnsibleDebugLog() bool { + const envVar = "ANSIBLE_DEBUG_LOGS" + val := false + if envVal, ok := os.LookupEnv(envVar); ok { + if i, err := strconv.ParseBool(envVal); err != nil { + log.Info("Could not parse environment variable as an boolean; using default value", + "envVar", envVar, "default", val) + } else { + val = i + } + } else if !ok { + log.Info("Environment variable not set; using default value", "envVar", envVar, + envVar, val) } + return val } diff --git a/cmd/helm-operator/main.go b/cmd/helm-operator/main.go index f76ba5d911..e97bb124ff 100644 --- a/cmd/helm-operator/main.go +++ b/cmd/helm-operator/main.go @@ -15,21 +15,217 @@ package main import ( - log "github.com/sirupsen/logrus" + "context" + "errors" + "fmt" + "os" + "runtime" + "strings" + "github.com/spf13/pflag" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" - "github.com/operator-framework/operator-sdk/pkg/helm" - hoflags "github.com/operator-framework/operator-sdk/pkg/helm/flags" + "github.com/operator-framework/operator-sdk/pkg/helm/controller" + "github.com/operator-framework/operator-sdk/pkg/helm/flags" + "github.com/operator-framework/operator-sdk/pkg/helm/release" + "github.com/operator-framework/operator-sdk/pkg/helm/watches" + "github.com/operator-framework/operator-sdk/pkg/k8sutil" + kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" + "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" + "github.com/operator-framework/operator-sdk/pkg/metrics" + sdkVersion "github.com/operator-framework/operator-sdk/version" +) + +var ( + metricsHost = "0.0.0.0" + metricsPort int32 = 8383 + operatorMetricsPort int32 = 8686 + + log = logf.Log.WithName("cmd") ) +func printVersion() { + log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) + log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) + log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) +} + func main() { - flags := hoflags.AddTo(pflag.CommandLine) + f := flags.Flags{} + f.AddTo(pflag.CommandLine) pflag.Parse() logf.SetLogger(zap.Logger()) - if err := helm.Run(flags); err != nil { - log.Fatal(err) + printVersion() + + cfg, err := config.GetConfig() + if err != nil { + log.Error(err, "Failed to get config.") + os.Exit(1) + } + + // Set default manager options + options := manager.Options{ + MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), + NewClient: func(cache cache.Cache, config *rest.Config, options crclient.Options) (crclient.Client, error) { + c, err := crclient.New(config, options) + if err != nil { + return nil, err + } + return &crclient.DelegatingClient{ + Reader: cache, + Writer: c, + StatusClient: c, + }, nil + }, + } + + namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar) + log = log.WithValues("Namespace", namespace) + if found { + if namespace == metav1.NamespaceAll { + log.Info("Watching all namespaces.") + options.Namespace = metav1.NamespaceAll + } else { + if strings.Contains(namespace, ",") { + log.Info("Watching multiple namespaces.") + options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) + } else { + log.Info("Watching single namespace.") + options.Namespace = namespace + } + } + } else { + log.Info(fmt.Sprintf("%v environment variable not set. Watching all namespaces.", + k8sutil.WatchNamespaceEnvVar)) + options.Namespace = metav1.NamespaceAll + } + + mgr, err := manager.New(cfg, options) + if err != nil { + log.Error(err, "Failed to create a new manager.") + os.Exit(1) + } + + ws, err := watches.Load(f.WatchesFile) + if err != nil { + log.Error(err, "Failed to create new manager factories.") + os.Exit(1) + } + var gvks []schema.GroupVersionKind + for _, w := range ws { + // Register the controller with the factory. + err := controller.Add(mgr, controller.WatchOptions{ + Namespace: namespace, + GVK: w.GroupVersionKind, + ManagerFactory: release.NewManagerFactory(mgr, w.ChartDir), + ReconcilePeriod: f.ReconcilePeriod, + WatchDependentResources: *w.WatchDependentResources, + OverrideValues: w.OverrideValues, + MaxWorkers: f.MaxWorkers, + }) + if err != nil { + log.Error(err, "Failed to add manager factory to controller.") + os.Exit(1) + } + gvks = append(gvks, w.GroupVersionKind) + } + + operatorName, err := k8sutil.GetOperatorName() + if err != nil { + log.Error(err, "Failed to get operator name") + os.Exit(1) + } + + ctx := context.TODO() + + // Become the leader before proceeding + err = leader.Become(ctx, operatorName+"-lock") + if err != nil { + log.Error(err, "Failed to become leader.") + os.Exit(1) + } + + addMetrics(context.TODO(), cfg, gvks) + + // Start the Cmd + if err = mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "Manager exited non-zero.") + os.Exit(1) + } +} + +// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using +// the Prometheus operator +func addMetrics(ctx context.Context, cfg *rest.Config, gvks []schema.GroupVersionKind) { + // Get the namespace the operator is currently deployed in. + operatorNs, err := k8sutil.GetOperatorNamespace() + if err != nil { + if errors.Is(err, k8sutil.ErrRunLocal) { + log.Info("Skipping CR metrics server creation; not running in a cluster.") + return + } + } + + if err := serveCRMetrics(cfg, operatorNs, gvks); err != nil { + log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) + } + + // Add to the below struct any other metrics ports you want to expose. + servicePorts := []v1.ServicePort{ + {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, + {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, + TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, + } + + // Create Service object to expose the metrics port(s). + service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) + if err != nil { + log.Info("Could not create metrics Service", "error", err.Error()) + } + + // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources + // necessary to configure Prometheus to scrape metrics from this operator. + services := []*v1.Service{service} + + // The ServiceMonitor is created in the same namespace where the operator is deployed + _, err = metrics.CreateServiceMonitors(cfg, operatorNs, services) + if err != nil { + log.Info("Could not create ServiceMonitor object", "error", err.Error()) + // If this operator is deployed to a cluster without the prometheus-operator running, it will return + // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. + if err == metrics.ErrServiceMonitorNotPresent { + log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) + } + } +} + +// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. +// It serves those metrics on "http://metricsHost:operatorMetricsPort". +func serveCRMetrics(cfg *rest.Config, operatorNs string, gvks []schema.GroupVersionKind) error { + // The metrics will be generated from the namespaces which are returned here. + // NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error. + ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs) + if err != nil { + return err + } + + // Generate and serve custom resource specific metrics. + err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, gvks, metricsHost, operatorMetricsPort) + if err != nil { + return err } + return nil } diff --git a/cmd/operator-sdk/alpha/scorecard/cmd.go b/cmd/operator-sdk/alpha/scorecard/cmd.go index 6d58719417..8a7ba4d9ad 100644 --- a/cmd/operator-sdk/alpha/scorecard/cmd.go +++ b/cmd/operator-sdk/alpha/scorecard/cmd.go @@ -86,24 +86,20 @@ If the argument holds an image tag, it must be present remotely.`, return scorecardCmd } -func (c *scorecardCmd) printOutput(output v1alpha3.Test) error { +func (c *scorecardCmd) printOutput(output v1alpha3.TestList) error { switch c.outputFormat { case "text": - if len(output.Status.Results) == 0 { + if len(output.Items) == 0 { fmt.Println("0 tests selected") return nil } - o, err := output.MarshalText() - if err != nil { - fmt.Println(err.Error()) - return err + for _, test := range output.Items { + fmt.Println(test.MarshalText()) } - fmt.Printf("%s\n", o) case "json": bytes, err := json.MarshalIndent(output, "", " ") if err != nil { - fmt.Println(err.Error()) - return err + return fmt.Errorf("marshal json error: %v", err) } fmt.Printf("%s\n", string(bytes)) default: @@ -154,12 +150,9 @@ func (c *scorecardCmd) run() (err error) { return fmt.Errorf("could not parse selector %w", err) } - var scorecardTest v1alpha3.Test + var scorecardTests v1alpha3.TestList if c.list { - scorecardTest, err = o.ListTests() - if err != nil { - return fmt.Errorf("error listing tests %w", err) - } + scorecardTests = o.List() } else { runner := scorecard.PodTestRunner{ ServiceAccount: c.serviceAccount, @@ -178,13 +171,31 @@ func (c *scorecardCmd) run() (err error) { ctx, cancel := context.WithTimeout(context.Background(), c.waitTime) defer cancel() - scorecardTest, err = o.RunTests(ctx) + scorecardTests, err = o.Run(ctx) if err != nil { return fmt.Errorf("error running tests %w", err) } } - return c.printOutput(scorecardTest) + if err := c.printOutput(scorecardTests); err != nil { + log.Fatal(err) + } + + if hasFailingTest(scorecardTests) { + os.Exit(1) + } + return nil +} + +func hasFailingTest(list v1alpha3.TestList) bool { + for _, t := range list.Items { + for _, r := range t.Status.Results { + if r.State != v1alpha3.PassState { + return true + } + } + } + return false } func (c *scorecardCmd) validate(args []string) error { diff --git a/internal/flags/watch/flags.go b/internal/flags/watch/flags.go deleted file mode 100644 index 1d52c58d93..0000000000 --- a/internal/flags/watch/flags.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Operator-SDK Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package watch - -import ( - "strings" - "time" - - "github.com/spf13/pflag" -) - -// todo(camilamacedo86):the WatchFlags should be renamed for the release 1.0.0 -// WatchFlags provides flag for configuration of a controller's reconcile period and for a -// watches.yaml file, which is used to configure dynamic operators (e.g. Ansible and Helm). -type WatchFlags struct { //nolint:golint - /* - The nolint is regards to: type name will be used as watch.WatchFlags by other packages, and that stutters; - consider calling this Flags (golint) - todo(camilamacedo86): Note that we decided to not introduce breakchanges to add the linters - and it should be done after. - From @joelanford: Even though watch.WatchFlags is an internal type, it is embedded in exported types, - which means that changing it to watch.Flags is a breaking change. - */ - - ReconcilePeriod time.Duration - WatchesFile string -} - -// AddTo - Add the reconcile period and watches file flags to the the flagset -// helpTextPrefix will allow you add a prefix to default help text. Joined by a space. -func (f *WatchFlags) AddTo(flagSet *pflag.FlagSet, helpTextPrefix ...string) { - flagSet.DurationVar(&f.ReconcilePeriod, - "reconcile-period", - time.Minute, - strings.Join(append(helpTextPrefix, "Default reconcile period for controllers"), " "), - ) - flagSet.StringVar(&f.WatchesFile, - "watches-file", - "./watches.yaml", - strings.Join(append(helpTextPrefix, "Path to the watches file to use"), " "), - ) -} diff --git a/internal/olm/operator/internal/registry_pod.go b/internal/olm/operator/internal/registry_pod.go new file mode 100644 index 0000000000..065f4ec0be --- /dev/null +++ b/internal/olm/operator/internal/registry_pod.go @@ -0,0 +1,321 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package olm + +import ( + "bytes" + "context" + "errors" + "fmt" + "path" + "strings" + "text/template" + "time" + + "github.com/operator-framework/operator-sdk/internal/flags" + "github.com/operator-framework/operator-sdk/internal/util/k8sutil" + + "github.com/spf13/viper" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +// BundleAddModeType - type of BundleAddMode in RegistryPod struct +type BundleAddModeType = string + +const ( + // SemverBundleAddMode - bundle add mode for semver + SemverBundleAddMode BundleAddModeType = "semver" + // ReplacesBundleAddMode - bundle add mode for replaces + ReplacesBundleAddMode BundleAddModeType = "replaces" +) +const ( + defaultIndexImage = "quay.io/operator-framework/upstream-opm-builder:latest" + defaultContainerName = "registry-grpc" + defaultContainerPortName = "grpc" + defaultGRPCPort = 50051 +) + +// RegistryPod holds resources necessary for creation of a registry server +type RegistryPod struct { + // BundleAddMode specifies the graph update mode that defines how channel graphs are updated + // It is of the type BundleAddModeType + BundleAddMode BundleAddModeType + + // BundleImage specifies the container image that opm uses to generate and incrementally update the database + BundleImage string + + // Index image contains a database of pointers to operator manifest content that is queriable via an API. + // new version of an operator bundle when published can be added to an index image + IndexImage string + + // DBPath refers to the registry DB; + // if an index image is provided, the existing registry DB is located at /database/index.db + DBPath string + + // Namespace refers to the specific namespace in which the registry pod will be created and scoped to + Namespace string + + // Kubeclient refers to a Kubernetes clientset that implements kubernetes.Interface. + Kubeclient kubernetes.Interface + + // GRPCPort is the container grpc port which is defaulted to 50051 + GRPCPort int32 + + // pod represents a kubernetes *corev1.pod that will be created on a cluster using an index image + pod *corev1.Pod +} + +// NewRegistryPod initializes the RegistryPod struct and sets defaults for empty fields +func NewRegistryPod(kubeclient kubernetes.Interface, dbPath, bundleImage, namespace string) (*RegistryPod, error) { + rp := &RegistryPod{} + + if rp.GRPCPort == 0 { + rp.GRPCPort = defaultGRPCPort + } + + if len(strings.TrimSpace(rp.IndexImage)) < 1 { + rp.IndexImage = defaultIndexImage + } + + if len(strings.TrimSpace(rp.BundleAddMode)) < 1 { + if rp.IndexImage == defaultIndexImage { + rp.BundleAddMode = SemverBundleAddMode + } else { + rp.BundleAddMode = ReplacesBundleAddMode + } + } + + rp.Kubeclient = kubeclient + rp.DBPath = dbPath + rp.BundleImage = bundleImage + rp.Namespace = namespace + + // validate the RegistryPod struct and ensure required fields are set + if err := rp.validate(); err != nil { + return nil, fmt.Errorf("error in validating registry pod struct: %v", err) + } + + // call podForBundleRegistry() to make the pod definition + pod, err := rp.podForBundleRegistry() + if err != nil { + return nil, fmt.Errorf("error in building registry pod definition: %v", err) + } + rp.pod = pod + + return rp, nil +} + +// Create creates a bundle registry pod built from an index image +// and returns error +func (rp *RegistryPod) Create(ctx context.Context) error { + if rp.pod == nil { + return errors.New("internal error: uninitialized RegistryPod cannot be used") + } + var ( + pod *corev1.Pod + err error + ) + + // Check if registry pod already exists + if pod, err = rp.Kubeclient.CoreV1().Pods(rp.pod.Namespace).Get(ctx, + rp.pod.Name, metav1.GetOptions{}); err != nil { + // if error exists and the error is due to pod not found, then create a new pod + if k8serrors.IsNotFound(err) { + // create registry pod in kubernetes cluster + if pod, err = rp.Kubeclient.CoreV1().Pods(rp.pod.Namespace).Create(ctx, + rp.pod, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("error creating registry pod: %v", err) + } + // assign rp.pod to the newly created pod + rp.pod = pod + } else { + return fmt.Errorf("error getting existing registry pod: %v", err) + } + } else { + // if an existing pod matching rp.pod.Name is found, assign rp.pod to the existing pod + rp.pod = pod + } + + return nil +} + +// VerifyPodRunning calls checkPodStatus to verify pod status +// and returns error if pod is not running +func (rp *RegistryPod) VerifyPodRunning(ctx context.Context) error { + // upon creation of new pod, poll and verify that pod status is running + podCheck := wait.ConditionFunc(func() (done bool, err error) { + p, err := rp.Kubeclient.CoreV1().Pods(rp.pod.Namespace).Get(ctx, + rp.pod.Name, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error getting pod %s: %w", rp.pod.Name, err) + } + return p.Status.Phase == corev1.PodRunning, nil + }) + + // check pod status to be Running + if err := rp.checkPodStatus(ctx, podCheck); err != nil { + podLogs, logErr := rp.GetLogs(ctx) + if logErr != nil { + return fmt.Errorf("error verifying pod creation: %v: and fetching logs: %v", err, logErr) + } + if viper.GetBool(flags.VerboseOpt) && podLogs != "" { + fmt.Println(podLogs) + } + return fmt.Errorf("registry pod did not become ready: %w", err) + } + return nil +} + +// checkPodStatus polls and verifies that the pod status is running +func (rp *RegistryPod) checkPodStatus(ctx context.Context, podCheck wait.ConditionFunc) error { + // poll every 200 ms until podCheck is true or context is done + err := wait.PollImmediateUntil(time.Duration(200*time.Millisecond), + podCheck, ctx.Done()) + if err != nil { + return fmt.Errorf("error waiting for registry pod %s to run: %v", rp.pod.Name, err) + } + + return err +} + +// validate will ensure that RegistryPod required fields are set +// and throws error if not set +func (rp *RegistryPod) validate() error { + if len(strings.TrimSpace(rp.BundleImage)) < 1 { + return errors.New("bundle image cannot be empty") + } + if len(strings.TrimSpace(rp.DBPath)) < 1 { + return errors.New("registry database path cannot be empty") + } + + if len(strings.TrimSpace(rp.Namespace)) < 1 { + return errors.New("pod namespace cannot be empty") + } + + if len(strings.TrimSpace(rp.BundleAddMode)) < 1 { + return errors.New("bundle add mode cannot be empty") + } + + if rp.BundleAddMode != SemverBundleAddMode && rp.BundleAddMode != ReplacesBundleAddMode { + return fmt.Errorf("invalid bundle mode %q: must be one of [%q, %q]", + rp.BundleAddMode, ReplacesBundleAddMode, SemverBundleAddMode) + } + + return nil +} + +// getPodName will return a string constructed from the bundle Image name +func getPodName(bundleImage string) string { + // todo(rashmigottipati): need to come up with human-readable references + // to be able to handle SHA references in the bundle images + return k8sutil.TrimDNS1123Label(k8sutil.FormatOperatorNameDNS1123(bundleImage)) +} + +// podForBundleRegistry constructs and returns the registry pod definition +// and throws error when unable to build the pod definition successfully +func (rp *RegistryPod) podForBundleRegistry() (*corev1.Pod, error) { + // construct the container command for pod spec + containerCmd, err := rp.getContainerCmd() + if err != nil { + return nil, fmt.Errorf("error in parsing container command: %v", err) + } + + // make the pod definition + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: getPodName(rp.BundleImage), + Namespace: rp.Namespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: defaultContainerName, + Image: rp.IndexImage, + Command: []string{ + "/bin/sh", + "-c", + containerCmd, + }, + Ports: []corev1.ContainerPort{ + {Name: defaultContainerPortName, ContainerPort: rp.GRPCPort}, + }, + }, + }, + }, + } + + return pod, nil +} + +// getContainerCmd uses templating to construct the container command +// and throws error if unable to parse and execute the container command +func (rp *RegistryPod) getContainerCmd() (string, error) { + const containerCommand = "/bin/mkdir -p {{ .DBPath | basename }} &&" + + "/bin/opm registry add -d {{ .DBPath | basename }} -b {{.BundleImage}} --mode={{.BundleAddMode}} &&" + + "/bin/opm registry serve -d {{ .DBPath | basename }} -p {{.GRPCPort}}" + type bundleCmd struct { + BundleImage, DBPath, BundleAddMode string + GRPCPort int32 + } + + var command = bundleCmd{rp.BundleImage, rp.DBPath, + rp.BundleAddMode, rp.GRPCPort} + + out := &bytes.Buffer{} + + // create a custom basename template function + funcMap := template.FuncMap{ + "basename": path.Base, + } + + // add the custom basename template function to the + // template's FuncMap and parse the containerCommand + tmp := template.Must(template.New("containerCommand").Funcs(funcMap).Parse(containerCommand)) + + // execute the command by applying the parsed tmp to command + // and write command output to out + if err := tmp.Execute(out, command); err != nil { + return "", fmt.Errorf("error in parsing container command: %w", err) + } + + return out.String(), nil +} + +// GetLogs gets the logs for the registry pod +// and throws error if failed to get pod logs +func (rp *RegistryPod) GetLogs(ctx context.Context) (string, error) { + if rp.pod == nil { + return "", errors.New("a registry pod must be created before getting pod logs") + } + + // get the logs of rp.pod.Name + req := rp.Kubeclient.CoreV1().Pods(rp.pod.Namespace).GetLogs(rp.pod.Name, &corev1.PodLogOptions{}) + podLogs, err := req.Stream(ctx) + if err != nil { + return "", fmt.Errorf("failed to get logs: %v", err) + } + defer podLogs.Close() + + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(podLogs) + if err != nil { + return "", fmt.Errorf("failed to read pod logs: %v", err) + } + return buf.String(), nil +} diff --git a/internal/olm/operator/internal/registry_pod_test.go b/internal/olm/operator/internal/registry_pod_test.go new file mode 100644 index 0000000000..b93b1a38f6 --- /dev/null +++ b/internal/olm/operator/internal/registry_pod_test.go @@ -0,0 +1,210 @@ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and + +package olm + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +// newFakeClient() returns a clientset +func newFakeClient() kubernetes.Interface { + return fake.NewSimpleClientset() +} +func TestCreateRegistryPod(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test Registry Pod Suite") +} + +var _ = Describe("RegistryPod", func() { + + Describe("creating registry pod", func() { + + Context("with valid registry pod values", func() { + expectedPodName := "quay-io-example-example-operator-bundle-0-2-0" + expectedOutput := "/bin/mkdir -p index.db &&" + + "/bin/opm registry add -d index.db -b quay.io/example/example-operator-bundle:0.2.0 --mode=semver &&" + + "/bin/opm registry serve -d index.db -p 50051" + + var rp *RegistryPod + var err error + + BeforeEach(func() { + rp, err = NewRegistryPod(newFakeClient(), "/database/index.db", "quay.io/example/example-operator-bundle:0.2.0", "default") + Expect(err).To(BeNil()) + }) + + It("should validate the RegistryPod successfully", func() { + err := rp.validate() + + Expect(err).To(BeNil()) + }) + + It("should create the RegistryPod successfully", func() { + Expect(rp).NotTo(BeNil()) + Expect(rp.pod.Name).To(Equal(expectedPodName)) + Expect(rp.pod.Namespace).To(Equal(rp.Namespace)) + Expect(rp.pod.Spec.Containers[0].Name).To(Equal(defaultContainerName)) + if len(rp.pod.Spec.Containers) > 0 { + if len(rp.pod.Spec.Containers[0].Ports) > 0 { + Expect(rp.pod.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(rp.GRPCPort)) + } + } + }) + + It("should return a valid container command", func() { + output, err := rp.getContainerCmd() + + Expect(err).To(BeNil()) + Expect(output).Should(Equal(expectedOutput)) + }) + + It("should return a pod definition successfully", func() { + rp.pod, err = rp.podForBundleRegistry() + + Expect(rp.pod).NotTo(BeNil()) + Expect(rp.pod.Name).To(Equal(expectedPodName)) + Expect(rp.pod.Namespace).To(Equal(rp.Namespace)) + Expect(rp.pod.Spec.Containers[0].Name).To(Equal(defaultContainerName)) + if len(rp.pod.Spec.Containers) > 0 { + if len(rp.pod.Spec.Containers[0].Ports) > 0 { + Expect(rp.pod.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(rp.GRPCPort)) + } + } + }) + + It("should create registry pod successfully", func() { + err := rp.Create(context.Background()) + + Expect(err).To(BeNil()) + }) + + It("check pod status should return successfully when pod check is true", func() { + mockGoodPodCheck := wait.ConditionFunc(func() (done bool, err error) { + return true, nil + }) + + err := rp.checkPodStatus(context.Background(), mockGoodPodCheck) + + Expect(err).To(BeNil()) + }) + }) + + Context("with invalid registry pod values", func() { + + It("should error when bundle image is not provided", func() { + expectedErr := "bundle image cannot be empty" + + _, err := NewRegistryPod(newFakeClient(), "/database/index.db", + "", "default") + + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("should not create a registry pod when namespace is not provided", func() { + expectedErr := "namespace cannot be empty" + + _, err := NewRegistryPod(newFakeClient(), "/database/index.db", + "quay.io/example/example-operator-bundle:0.2.0", "") + + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("should not create a registry pod when database path is not provided", func() { + expectedErr := "registry database path cannot be empty" + + _, err := NewRegistryPod(newFakeClient(), "", + "quay.io/example/example-operator-bundle:0.2.0", "default") + + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("should not create a registry pod when bundle add mode is empty", func() { + expectedErr := "bundle add mode cannot be empty" + + rp, _ := NewRegistryPod(newFakeClient(), "/database/index.db", + "quay.io/example/example-operator-bundle:0.2.0", "default") + rp.BundleAddMode = "" + + err := rp.validate() + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("should not accept any other bundle add mode other than semver or replaces", func() { + expectedErr := "invalid bundle mode" + + rp, _ := NewRegistryPod(newFakeClient(), "/database/index.db", + "quay.io/example/example-operator-bundle:0.2.0", "default") + rp.BundleAddMode = "invalid" + + err := rp.validate() + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("checkPodStatus should return error when pod check is false and context is done", func() { + rp, _ := NewRegistryPod(newFakeClient(), "/database/index.db", + "quay.io/example/example-operator-bundle:0.2.0", "default") + + mockBadPodCheck := wait.ConditionFunc(func() (done bool, err error) { + return false, fmt.Errorf("error waiting for registry pod") + }) + + expectedErr := "error waiting for registry pod" + // create a new context with a deadline of 1 millisecond + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + cancel() + + err := rp.checkPodStatus(ctx, mockBadPodCheck) + + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("Create should fail when registry pod is not initialized", func() { + rp := RegistryPod{} + expectedErr := "internal error: uninitialized RegistryPod cannot be used" + + err := rp.Create(context.Background()) + + Expect(err).NotTo(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + It("should not be able to get pod logs if pod is not initialized", func() { + rp := RegistryPod{} + expectedErr := "a registry pod must be created before getting pod logs" + + _, err := rp.GetLogs(context.Background()) + + Expect(err).ToNot(BeNil()) + Expect(err.Error()).Should(ContainSubstring(expectedErr)) + }) + + // todo(rashmigottipati): add test to check VerifyPodRunning returning error + }) + }) +}) diff --git a/internal/registry/labels.go b/internal/registry/labels.go index ed03778963..b51ec23156 100644 --- a/internal/registry/labels.go +++ b/internal/registry/labels.go @@ -27,7 +27,7 @@ import ( registrybundle "github.com/operator-framework/operator-registry/pkg/lib/bundle" log "github.com/sirupsen/logrus" - // TODO: replace `gopkg.in/yaml.v2` with `sigs.k8s.io/yaml` once operator-registry has `json` tags in the + // TODO: replace `gopkg.in/yaml.v3` with `sigs.k8s.io/yaml` once operator-registry has `json` tags in the // annotations struct. yaml "gopkg.in/yaml.v3" ) diff --git a/internal/scorecard/alpha/config.go b/internal/scorecard/alpha/config.go index 254de68be7..2eef8a6045 100644 --- a/internal/scorecard/alpha/config.go +++ b/internal/scorecard/alpha/config.go @@ -25,19 +25,24 @@ const ( ConfigDirPath = "/tests/" + ConfigDirName + "/" ) +type Stage struct { + Parallel bool `yaml:"parallel"` + Tests []Test `yaml:"tests"` +} + type Test struct { - Name string `yaml:"name"` // The container test name - Image string `yaml:"image"` // The container image name - // An list of commands and arguments passed to the test image - Entrypoint []string `yaml:"entrypoint,omitempty"` - Labels map[string]string `yaml:"labels"` // User defined labels used to filter tests - Description string `yaml:"description"` // User readable test description + // Image is the name of the testimage + Image string `json:"image"` + // Entrypoint is list of commands and arguments passed to the test image + Entrypoint []string `json:"entrypoint,omitempty"` + // Labels that further describe the test and enable selection + Labels map[string]string `json:"labels,omitempty"` } // Config represents the set of test configurations which scorecard // would run based on user input type Config struct { - Tests []Test `yaml:"tests"` + Stages []Stage `yaml:"stages"` } // LoadConfig will find and return the scorecard config, the config file diff --git a/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml b/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml index f5fbe5da9c..103a2ec1b4 100644 --- a/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml +++ b/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml @@ -1,19 +1,16 @@ -tests: -- name: "customtest1" - image: quay.io/username/custom-scorecard-tests:dev - entrypoint: - - custom-scorecard-tests - - customtest1 - labels: - suite: custom - test: customtest1 - description: an ISV custom test -- name: "customtest2" - entrypoint: - - custom-scorecard-tests - - customtest2 - image: quay.io/username/custom-scorecard-tests:dev - labels: - suite: custom - test: customtest2 - description: an ISV custom test \ No newline at end of file +stages: +- tests: + - image: quay.io/username/custom-scorecard-tests:dev + entrypoint: + - custom-scorecard-tests + - customtest1 + labels: + suite: custom + test: customtest1 + - image: quay.io/username/custom-scorecard-tests:dev + entrypoint: + - custom-scorecard-tests + - customtest2 + labels: + suite: custom + test: customtest2 diff --git a/internal/scorecard/alpha/formatting.go b/internal/scorecard/alpha/formatting.go index 67c2eca737..389a3698a6 100644 --- a/internal/scorecard/alpha/formatting.go +++ b/internal/scorecard/alpha/formatting.go @@ -18,47 +18,41 @@ import ( "context" "encoding/json" - "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3" v1 "k8s.io/api/core/v1" + + "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3" ) // getTestResult fetches the test pod log and converts it into // Test format -func (r PodTestRunner) getTestStatus(ctx context.Context, p *v1.Pod, test Test) (output *v1alpha3.TestStatus) { - +func (r PodTestRunner) getTestStatus(ctx context.Context, p *v1.Pod) (output *v1alpha3.TestStatus) { logBytes, err := getPodLog(ctx, r.Client, p) if err != nil { - return testStatusError(err, test) + return convertErrorToStatus(err, string(logBytes)) } // marshal pod log into TestResult err = json.Unmarshal(logBytes, &output) if err != nil { - return testStatusError(err, test) + return convertErrorToStatus(err, string(logBytes)) } return output } -// ListTests lists the scorecard tests as configured that would be +// List lists the scorecard tests as configured that would be // run based on user selection -func (o Scorecard) ListTests() (output v1alpha3.Test, err error) { - tests := o.selectTests() - if len(tests) == 0 { - return output, err - } - - for _, test := range tests { - output.Status.Results = append(output.Status.Results, v1alpha3.TestResult{Name: test.Name}) - } - - return output, err -} - -func testStatusError(err error, test Test) *v1alpha3.TestStatus { - r := v1alpha3.TestResult{} - r.Name = test.Name - r.State = v1alpha3.FailState - r.Errors = []string{err.Error()} - return &v1alpha3.TestStatus{ - Results: []v1alpha3.TestResult{r}, +func (o Scorecard) List() v1alpha3.TestList { + output := v1alpha3.NewTestList() + for _, stage := range o.Config.Stages { + tests := o.selectTests(stage) + for _, test := range tests { + item := v1alpha3.NewTest() + item.Spec = v1alpha3.TestSpec{ + Image: test.Image, + Entrypoint: test.Entrypoint, + Labels: test.Labels, + } + output.Items = append(output.Items, item) + } } + return output } diff --git a/internal/scorecard/alpha/formatting_test.go b/internal/scorecard/alpha/formatting_test.go index 9a91f4d93b..02b482987e 100644 --- a/internal/scorecard/alpha/formatting_test.go +++ b/internal/scorecard/alpha/formatting_test.go @@ -18,7 +18,6 @@ import ( "path/filepath" "testing" - "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3" "k8s.io/apimachinery/pkg/labels" ) @@ -27,10 +26,9 @@ func TestList(t *testing.T) { cases := []struct { bundlePathValue string selector string - wantError bool resultCount int }{ - {"testdata/bundle", "suite=basic", false, 1}, + {"testdata/bundle", "suite=basic", 1}, } for _, c := range cases { @@ -50,17 +48,8 @@ func TestList(t *testing.T) { } runner.BundlePath = c.bundlePathValue o.TestRunner = &runner - var output v1alpha3.Test - output, err = o.ListTests() - if err == nil && c.wantError { - t.Fatalf("Wanted error but got no error") - } else if err != nil { - if !c.wantError { - t.Fatalf("Wanted result but got error: %v", err) - } - return - } - actualResultCount := len(output.Status.Results) + output := o.List() + actualResultCount := len(output.Items) if c.resultCount != actualResultCount { t.Fatalf("Wanted result count %d but got : %d", c.resultCount, actualResultCount) } diff --git a/internal/scorecard/alpha/labels_test.go b/internal/scorecard/alpha/labels_test.go index eace28bf57..8bd4d2d605 100644 --- a/internal/scorecard/alpha/labels_test.go +++ b/internal/scorecard/alpha/labels_test.go @@ -55,7 +55,7 @@ func TestEmptySelector(t *testing.T) { return } - tests := o.selectTests() + tests := o.selectTests(o.Config.Stages[0]) testsSelected := len(tests) if testsSelected != c.testsSelected { t.Errorf("Wanted testsSelected %d, got: %d", c.testsSelected, testsSelected) @@ -65,65 +65,52 @@ func TestEmptySelector(t *testing.T) { } } -const testConfig = `tests: -- name: "customtest1" - image: quay.io/someuser/customtest1:v0.0.1 - entrypoint: - - custom-test - labels: - suite: custom - test: customtest1 - description: an ISV custom test that does... -- name: "customtest2" - image: quay.io/someuser/customtest2:v0.0.1 - entrypoint: - - custom-test - labels: - suite: custom - test: customtest2 - description: an ISV custom test that does... -- name: "basic-check-spec" - image: quay.io/redhat/basictests:v0.0.1 - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test - description: check the spec test -- name: "basic-check-status" - image: quay.io/redhat/basictests:v0.0.1 - entrypoint: - - scorecard-test - - basic-check-status - labels: - suite: basic - test: basic-check-status-test - description: check the status test -- name: "olm-bundle-validation" - image: quay.io/redhat/olmtests:v0.0.1 - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test - description: validate the bundle test -- name: "olm-crds-have-validation" - image: quay.io/redhat/olmtests:v0.0.1 - entrypoint: - - scorecard-test - - olm-crds-have-validation - labels: - suite: olm - test: olm-crds-have-validation-test - description: CRDs have validation -- name: "kuttl-tests" - image: quay.io/redhat/kuttltests:v0.0.1 - labels: - suite: kuttl - entrypoint: - - kuttl-test - - olm-status-descriptors - description: Kuttl tests +const testConfig = `stages: +- tests: + - image: quay.io/someuser/customtest1:v0.0.1 + entrypoint: + - custom-test + labels: + suite: custom + test: customtest1 + - image: quay.io/someuser/customtest2:v0.0.1 + entrypoint: + - custom-test + labels: + suite: custom + test: customtest2 + - image: quay.io/redhat/basictests:v0.0.1 + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + - image: quay.io/redhat/basictests:v0.0.1 + entrypoint: + - scorecard-test + - basic-check-status + labels: + suite: basic + test: basic-check-status-test + - image: quay.io/redhat/olmtests:v0.0.1 + entrypoint: + - scorecard-test + - olm-bundle-validation + labels: + suite: olm + test: olm-bundle-validation-test + - image: quay.io/redhat/olmtests:v0.0.1 + entrypoint: + - scorecard-test + - olm-crds-have-validation + labels: + suite: olm + test: olm-crds-have-validation-test + - image: quay.io/redhat/kuttltests:v0.0.1 + labels: + suite: kuttl + entrypoint: + - kuttl-test + - olm-status-descriptors ` diff --git a/internal/scorecard/alpha/run_test.go b/internal/scorecard/alpha/run_test.go index 84e5928d0f..2add2b63ba 100644 --- a/internal/scorecard/alpha/run_test.go +++ b/internal/scorecard/alpha/run_test.go @@ -20,12 +20,13 @@ import ( "testing" "time" - "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3" "k8s.io/apimachinery/pkg/labels" + + "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3" ) +// TODO(joelanford): rewrite to use ginkgo/gomega func TestRunTests(t *testing.T) { - cases := []struct { name string configPathValue string @@ -79,11 +80,11 @@ func TestRunTests(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(7*time.Second)) defer cancel() - var scorecardOutput v1alpha3.Test - scorecardOutput, err = o.RunTests(ctx) + var scorecardOutput v1alpha3.TestList + scorecardOutput, err = o.Run(ctx) - if scorecardOutput.Status.Results[0].State != c.expectedState { - t.Fatalf("Wanted state %v, got %v", c.expectedState, scorecardOutput.Status.Results[0].State) + if scorecardOutput.Items[0].Status.Results[0].State != c.expectedState { + t.Fatalf("Wanted state %v, got %v", c.expectedState, scorecardOutput.Items[0].Status.Results[0].State) } if err == nil && c.wantError { @@ -99,3 +100,112 @@ func TestRunTests(t *testing.T) { } } + +// TODO(joelanford): rewrite to use ginkgo/gomega +func TestRunParallelPass(t *testing.T) { + scorecard := getFakeScorecard(true) + ctx, cancel := context.WithTimeout(context.Background(), 7*time.Millisecond) + defer cancel() + + tests, err := scorecard.Run(ctx) + if err != nil { + t.Fatalf("Expected no error, got error: %v", err) + } + if len(tests.Items) != 2 { + t.Fatalf("Expected 2 tests, got %d", len(tests.Items)) + } + for _, test := range tests.Items { + expectPass(t, test) + } +} + +// TODO(joelanford): rewrite to use ginkgo/gomega +func TestRunSequentialPass(t *testing.T) { + scorecard := getFakeScorecard(false) + ctx, cancel := context.WithTimeout(context.Background(), 12*time.Millisecond) + defer cancel() + + tests, err := scorecard.Run(ctx) + if err != nil { + t.Fatalf("Expected no error, got error: %v", err) + } + if len(tests.Items) != 2 { + t.Fatalf("Expected 2 tests, got %d", len(tests.Items)) + } + for _, test := range tests.Items { + expectPass(t, test) + } +} + +// TODO(joelanford): rewrite to use ginkgo/gomega +func TestRunSequentialFail(t *testing.T) { + scorecard := getFakeScorecard(false) + + ctx, cancel := context.WithTimeout(context.Background(), 7*time.Millisecond) + defer cancel() + + tests, err := scorecard.Run(ctx) + if err != nil { + t.Fatalf("Expected no error, got error: %v", err) + } + if len(tests.Items) != 2 { + t.Fatalf("Expected 2 tests, got %d", len(tests.Items)) + } + + expectPass(t, tests.Items[0]) + expectDeadlineExceeded(t, tests.Items[1]) +} + +func getFakeScorecard(parallel bool) Scorecard { + return Scorecard{ + Config: Config{ + Stages: []Stage{ + { + Parallel: parallel, + Tests: []Test{ + {}, + {}, + }, + }, + }, + }, + TestRunner: FakeTestRunner{ + Sleep: 5 * time.Millisecond, + TestStatus: &v1alpha3.TestStatus{ + Results: []v1alpha3.TestResult{ + { + State: v1alpha3.PassState, + }, + }, + }, + }, + } +} + +func expectPass(t *testing.T, test v1alpha3.Test) { + if len(test.Status.Results) != 1 { + t.Fatalf("Expected 1 results, got %d", len(test.Status.Results)) + } + for _, r := range test.Status.Results { + if len(r.Errors) > 0 { + t.Fatalf("Expected no errors, got %v", r.Errors) + } + if r.State != v1alpha3.PassState { + t.Fatalf("Expected result state %q, got %q", v1alpha3.PassState, r.State) + } + } +} + +func expectDeadlineExceeded(t *testing.T, test v1alpha3.Test) { + if len(test.Status.Results) != 1 { + t.Fatalf("Expected 1 results, got %d", len(test.Status.Results)) + } + for _, r := range test.Status.Results { + if len(r.Errors) != 1 || r.Errors[0] != context.DeadlineExceeded.Error() { + t.Fatalf("Expected error %q error, got %v", context.DeadlineExceeded, r.Errors) + } + if r.State != v1alpha3.FailState { + t.Fatalf("Expected result state %q, got %q", v1alpha3.FailState, r.State) + } + } +} diff --git a/internal/scorecard/alpha/scorecard.go b/internal/scorecard/alpha/scorecard.go index 2813fa2282..38274332b5 100644 --- a/internal/scorecard/alpha/scorecard.go +++ b/internal/scorecard/alpha/scorecard.go @@ -17,6 +17,7 @@ package alpha import ( "context" "fmt" + "sync" "time" v1 "k8s.io/api/core/v1" @@ -53,48 +54,86 @@ type PodTestRunner struct { } type FakeTestRunner struct { + Sleep time.Duration TestStatus *v1alpha3.TestStatus Error error } -// RunTests executes the scorecard tests as configured -func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) { +// Run executes the scorecard tests as configured +func (o Scorecard) Run(ctx context.Context) (v1alpha3.TestList, error) { + testOutput := v1alpha3.NewTestList() - err = o.TestRunner.Initialize(ctx) - if err != nil { + if err := o.TestRunner.Initialize(ctx); err != nil { return testOutput, err } - tests := o.selectTests() - if len(tests) == 0 { - return testOutput, nil - } + for _, stage := range o.Config.Stages { + tests := o.selectTests(stage) + if len(tests) == 0 { + continue + } - for _, test := range tests { - result, err := o.TestRunner.RunTest(ctx, test) - if err != nil { - result = convertErrorToStatus(test.Name, err) + output := make(chan v1alpha3.Test, len(tests)) + if stage.Parallel { + o.runStageParallel(ctx, tests, output) + } else { + o.runStageSequential(ctx, tests, output) + } + close(output) + for o := range output { + testOutput.Items = append(testOutput.Items, o) } - testOutput.Status.Results = append(testOutput.Status.Results, result.Results...) } if !o.SkipCleanup { - err = o.TestRunner.Cleanup(ctx) - if err != nil { + if err := o.TestRunner.Cleanup(ctx); err != nil { return testOutput, err + } } return testOutput, nil } +func (o Scorecard) runStageParallel(ctx context.Context, tests []Test, results chan<- v1alpha3.Test) { + var wg sync.WaitGroup + for _, t := range tests { + wg.Add(1) + go func(test Test) { + results <- o.runTest(ctx, test) + wg.Done() + }(t) + } + wg.Wait() +} + +func (o Scorecard) runStageSequential(ctx context.Context, tests []Test, results chan<- v1alpha3.Test) { + for _, test := range tests { + results <- o.runTest(ctx, test) + } +} + +func (o Scorecard) runTest(ctx context.Context, test Test) v1alpha3.Test { + result, err := o.TestRunner.RunTest(ctx, test) + if err != nil { + result = convertErrorToStatus(err, "") + } + + out := v1alpha3.NewTest() + out.Spec = v1alpha3.TestSpec{ + Image: test.Image, + Entrypoint: test.Entrypoint, + Labels: test.Labels, + } + out.Status = *result + return out +} + // selectTests applies an optionally passed selector expression // against the configured set of tests, returning the selected tests -func (o Scorecard) selectTests() []Test { - +func (o *Scorecard) selectTests(stage Stage) []Test { selected := make([]Test, 0) - - for _, test := range o.Config.Tests { - if o.Selector.String() == "" || o.Selector.Matches(labels.Set(test.Labels)) { + for _, test := range stage.Tests { + if o.Selector == nil || o.Selector.String() == "" || o.Selector.Matches(labels.Set(test.Labels)) { // TODO olm manifests check selected = append(selected, test) } @@ -102,8 +141,13 @@ func (o Scorecard) selectTests() []Test { return selected } -func (r FakeTestRunner) Initialize(ctx context.Context) (err error) { - return nil +func (r FakeTestRunner) Initialize(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return nil + } } // Initialize sets up the bundle configmap for tests @@ -121,8 +165,13 @@ func (r *PodTestRunner) Initialize(ctx context.Context) error { } -func (r FakeTestRunner) Cleanup(ctx context.Context) (err error) { - return nil +func (r FakeTestRunner) Cleanup(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + return nil + } } // Cleanup deletes pods and configmap resources from this test run @@ -139,27 +188,30 @@ func (r PodTestRunner) Cleanup(ctx context.Context) (err error) { } // RunTest executes a single test -func (r PodTestRunner) RunTest(ctx context.Context, test Test) (result *v1alpha3.TestStatus, err error) { - +func (r PodTestRunner) RunTest(ctx context.Context, test Test) (*v1alpha3.TestStatus, error) { // Create a Pod to run the test podDef := getPodDefinition(r.configMapName, test, r) pod, err := r.Client.CoreV1().Pods(r.Namespace).Create(ctx, podDef, metav1.CreateOptions{}) if err != nil { - return result, err + return nil, err } err = r.waitForTestToComplete(ctx, pod) if err != nil { - return result, err + return nil, err } - result = r.getTestStatus(ctx, pod, test) - return result, nil + return r.getTestStatus(ctx, pod), nil } // RunTest executes a single test func (r FakeTestRunner) RunTest(ctx context.Context, test Test) (result *v1alpha3.TestStatus, err error) { - return r.TestStatus, r.Error + select { + case <-time.After(r.Sleep): + return r.TestStatus, r.Error + case <-ctx.Done(): + return nil, ctx.Err() + } } func ConfigDocLink() string { @@ -182,17 +234,16 @@ func (r PodTestRunner) waitForTestToComplete(ctx context.Context, p *v1.Pod) (er return false, nil }) - err = wait.PollImmediateUntil(time.Duration(1*time.Second), podCheck, ctx.Done()) + err = wait.PollImmediateUntil(1*time.Second, podCheck, ctx.Done()) return err } -func convertErrorToStatus(name string, err error) *v1alpha3.TestStatus { +func convertErrorToStatus(err error, log string) *v1alpha3.TestStatus { result := v1alpha3.TestResult{} - result.Name = name - result.Errors = []string{err.Error()} - result.Suggestions = []string{} result.State = v1alpha3.FailState + result.Errors = []string{err.Error()} + result.Log = log return &v1alpha3.TestStatus{ Results: []v1alpha3.TestResult{result}, } diff --git a/internal/scorecard/alpha/tar.go b/internal/scorecard/alpha/tar.go index fbd836dcd1..2e7dcae60a 100644 --- a/internal/scorecard/alpha/tar.go +++ b/internal/scorecard/alpha/tar.go @@ -194,7 +194,7 @@ func newTarDirHeader(path string) *tar.Header { Typeflag: tar.TypeDir, Name: filepath.Clean(path) + "/", ModTime: time.Now(), - Mode: 0700, + Mode: 0755, Uid: os.Getuid(), Gid: os.Getgid(), } diff --git a/internal/scorecard/alpha/testdata/bundle.tar.gz b/internal/scorecard/alpha/testdata/bundle.tar.gz index 4f569ab3c1..5e0933ba70 100644 Binary files a/internal/scorecard/alpha/testdata/bundle.tar.gz and b/internal/scorecard/alpha/testdata/bundle.tar.gz differ diff --git a/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml b/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml index b56656a08c..4831301bbc 100644 --- a/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml +++ b/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml @@ -1,55 +1,45 @@ -tests: -- name: "basic-check-spec" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test - description: check the spec test -- name: "olm-bundle-validation" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test - description: validate the bundle test -- name: "olm-crds-have-validation" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-crds-have-validation - labels: - suite: olm - test: olm-crds-have-validation-test - description: CRDs have validation -- name: "olm-crds-have-resources" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-crds-have-resources - labels: - suite: olm - test: olm-crds-have-resources-test - description: CRDs have resources -- name: "olm-spec-descriptors" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-spec-descriptors - labels: - suite: olm - test: olm-spec-descriptors-test - description: OLM Spec Descriptors -- name: "olm-status-descriptors" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-status-descriptors - labels: - suite: olm - test: olm-status-descriptors-test - description: OLM Status Descriptors +stages: +- parallel: true + tests: + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-bundle-validation + labels: + suite: olm + test: olm-bundle-validation-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-crds-have-validation + labels: + suite: olm + test: olm-crds-have-validation-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-crds-have-resources + labels: + suite: olm + test: olm-crds-have-resources-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-spec-descriptors + labels: + suite: olm + test: olm-spec-descriptors-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-status-descriptors + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/internal/scorecard/alpha/testpod.go b/internal/scorecard/alpha/testpod.go index 89e93d2c17..c1abc96315 100644 --- a/internal/scorecard/alpha/testpod.go +++ b/internal/scorecard/alpha/testpod.go @@ -32,6 +32,7 @@ const ( // PodLabelsDir is the name of the directory containing bundle labels. PodLabelsDirName = "labels" + // PodLabelsDir is the directory containing an annotations.yaml file that is // the source of truth for bundle metadata. These labels come from the // bundle image if applicable. @@ -127,19 +128,18 @@ func getPodDefinition(configMapName string, test Test, r PodTestRunner) *v1.Pod } // getPodLog fetches the test results which are found in the pod log -func getPodLog(ctx context.Context, client kubernetes.Interface, pod *v1.Pod) (logOutput []byte, err error) { - +func getPodLog(ctx context.Context, client kubernetes.Interface, pod *v1.Pod) ([]byte, error) { req := client.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}) podLogs, err := req.Stream(ctx) if err != nil { - return logOutput, err + return nil, err } defer podLogs.Close() buf := new(bytes.Buffer) _, err = io.Copy(buf, podLogs) if err != nil { - return logOutput, err + return nil, err } return buf.Bytes(), err } @@ -154,5 +154,4 @@ func (r PodTestRunner) deletePods(ctx context.Context, configMapName string) err return fmt.Errorf("error deleting pods selector %s %w", selector, err) } return nil - } diff --git a/pkg/ansible/flags/flag.go b/pkg/ansible/flags/flag.go index 3bb961ba61..92a57c211e 100644 --- a/pkg/ansible/flags/flag.go +++ b/pkg/ansible/flags/flag.go @@ -15,16 +15,18 @@ package flags import ( - "strings" + "runtime" + "time" - "github.com/operator-framework/operator-sdk/internal/flags/watch" - "github.com/operator-framework/operator-sdk/pkg/log/zap" "github.com/spf13/pflag" + + "github.com/operator-framework/operator-sdk/pkg/log/zap" ) -// AnsibleOperatorFlags - Options to be used by an ansible operator -type AnsibleOperatorFlags struct { - watch.WatchFlags +// Flags - Options to be used by an ansible operator +type Flags struct { + ReconcilePeriod time.Duration + WatchesFile string InjectOwnerRef bool MaxWorkers int AnsibleVerbosity int @@ -37,45 +39,41 @@ const AnsibleCollectionsPathEnvVar = "ANSIBLE_COLLECTIONS_PATH" // AddTo - Add the ansible operator flags to the the flagset // helpTextPrefix will allow you add a prefix to default help text. Joined by a space. -func AddTo(flagSet *pflag.FlagSet, helpTextPrefix ...string) *AnsibleOperatorFlags { - aof := &AnsibleOperatorFlags{} - aof.WatchFlags.AddTo(flagSet, helpTextPrefix...) +func (f *Flags) AddTo(flagSet *pflag.FlagSet, helpTextPrefix ...string) { flagSet.AddFlagSet(zap.FlagSet()) - flagSet.BoolVar(&aof.InjectOwnerRef, + flagSet.DurationVar(&f.ReconcilePeriod, + "reconcile-period", + time.Minute, + "Default reconcile period for controllers", + ) + flagSet.StringVar(&f.WatchesFile, + "watches-file", + "./watches.yaml", + "Path to the watches file to use", + ) + flagSet.BoolVar(&f.InjectOwnerRef, "inject-owner-ref", true, - strings.Join(append(helpTextPrefix, - "The ansible operator will inject owner references unless this flag is false"), " "), + "The ansible operator will inject owner references unless this flag is false", ) - flagSet.IntVar(&aof.MaxWorkers, + flagSet.IntVar(&f.MaxWorkers, "max-workers", - 1, - strings.Join(append(helpTextPrefix, - "Maximum number of workers to use. Overridden by environment variable."), - " "), + runtime.NumCPU(), + "Maximum number of workers to use. Overridden by environment variable.", ) - flagSet.IntVar(&aof.AnsibleVerbosity, + flagSet.IntVar(&f.AnsibleVerbosity, "ansible-verbosity", 2, - strings.Join(append(helpTextPrefix, - "Ansible verbosity. Overridden by environment variable."), - " "), + "Ansible verbosity. Overridden by environment variable.", ) - flagSet.StringVar(&aof.AnsibleRolesPath, + flagSet.StringVar(&f.AnsibleRolesPath, "ansible-roles-path", "", - strings.Join(append(helpTextPrefix, - "Ansible Roles Path. If unset, roles are assumed to be in {{CWD}}/roles."), - " "), + "Ansible Roles Path. If unset, roles are assumed to be in {{CWD}}/roles.", ) - flagSet.StringVar(&aof.AnsibleCollectionsPath, + flagSet.StringVar(&f.AnsibleCollectionsPath, "ansible-collections-path", "", - strings.Join(append(helpTextPrefix, - `Path to installed Ansible Collections. If set, collections should be - located in {{value}}/ansible_collections/. If unset, collections are - assumed to be in ~/.ansible/collections or - /usr/share/ansible/collections.`), " "), + "Path to installed Ansible Collections. If set, collections should be located in {{value}}/ansible_collections/. If unset, collections are assumed to be in ~/.ansible/collections or /usr/share/ansible/collections.", ) - return aof } diff --git a/pkg/ansible/run.go b/pkg/ansible/run.go deleted file mode 100644 index 3bbc5ee24e..0000000000 --- a/pkg/ansible/run.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2019 The Operator-SDK Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ansible - -import ( - "context" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "strings" - - "github.com/operator-framework/operator-sdk/pkg/ansible/controller" - aoflags "github.com/operator-framework/operator-sdk/pkg/ansible/flags" - proxy "github.com/operator-framework/operator-sdk/pkg/ansible/proxy" - "github.com/operator-framework/operator-sdk/pkg/ansible/proxy/controllermap" - "github.com/operator-framework/operator-sdk/pkg/ansible/runner" - "github.com/operator-framework/operator-sdk/pkg/ansible/watches" - "github.com/operator-framework/operator-sdk/pkg/k8sutil" - kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" - "github.com/operator-framework/operator-sdk/pkg/leader" - "github.com/operator-framework/operator-sdk/pkg/metrics" - sdkVersion "github.com/operator-framework/operator-sdk/version" - "sigs.k8s.io/controller-runtime/pkg/healthz" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" -) - -var ( - metricsHost = "0.0.0.0" - log = logf.Log.WithName("cmd") - metricsPort int32 = 8383 - operatorMetricsPort int32 = 8686 - healthProbePort int32 = 6789 -) - -func printVersion() { - log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) - log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) - log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) -} - -// Run will start the ansible operator and proxy, blocking until one of them -// returns. -func Run(flags *aoflags.AnsibleOperatorFlags) error { - printVersion() - - cfg, err := config.GetConfig() - if err != nil { - log.Error(err, "Failed to get config.") - return err - } - - // Set default manager options - // TODO: probably should expose the host & port as an environment variables - options := manager.Options{ - HealthProbeBindAddress: fmt.Sprintf("%s:%d", metricsHost, healthProbePort), - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), - NewClient: func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { - c, err := client.New(config, options) - if err != nil { - return nil, err - } - return &client.DelegatingClient{ - Reader: cache, - Writer: c, - StatusClient: c, - }, nil - }, - } - - namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar) - log = log.WithValues("Namespace", namespace) - if found { - if namespace == metav1.NamespaceAll { - log.Info("Watching all namespaces.") - options.Namespace = metav1.NamespaceAll - } else { - if strings.Contains(namespace, ",") { - log.Info("Watching multiple namespaces.") - options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) - } else { - log.Info("Watching single namespace.") - options.Namespace = namespace - } - } - } else { - log.Info(fmt.Sprintf("%v environment variable not set. Watching all namespaces.", - k8sutil.WatchNamespaceEnvVar)) - options.Namespace = metav1.NamespaceAll - } - - // Create a new manager to provide shared dependencies and start components - mgr, err := manager.New(cfg, options) - if err != nil { - log.Error(err, "Failed to create a new manager.") - return err - } - - var gvks []schema.GroupVersionKind - cMap := controllermap.NewControllerMap() - watches, err := watches.Load(flags.WatchesFile, flags.MaxWorkers, flags.AnsibleVerbosity) - if err != nil { - log.Error(err, "Failed to load watches.") - return err - } - for _, w := range watches { - runner, err := runner.New(w) - if err != nil { - log.Error(err, "Failed to create runner") - return err - } - - ctr := controller.Add(mgr, controller.Options{ - GVK: w.GroupVersionKind, - Runner: runner, - ManageStatus: w.ManageStatus, - AnsibleDebugLogs: getAnsibleDebugLog(), - MaxWorkers: w.MaxWorkers, - ReconcilePeriod: w.ReconcilePeriod, - Selector: w.Selector, - }) - if ctr == nil { - return fmt.Errorf("failed to add controller for GVK %v", w.GroupVersionKind.String()) - } - - cMap.Store(w.GroupVersionKind, &controllermap.Contents{Controller: *ctr, - WatchDependentResources: w.WatchDependentResources, - WatchClusterScopedResources: w.WatchClusterScopedResources, - OwnerWatchMap: controllermap.NewWatchMap(), - AnnotationWatchMap: controllermap.NewWatchMap(), - }, w.Blacklist) - gvks = append(gvks, w.GroupVersionKind) - } - - operatorName, err := k8sutil.GetOperatorName() - if err != nil { - log.Error(err, "Failed to get the operator name") - return err - } - - // Become the leader before proceeding - err = leader.Become(context.TODO(), operatorName+"-lock") - if err != nil { - log.Error(err, "Failed to become leader.") - return err - } - - addMetrics(context.TODO(), cfg, gvks) - err = mgr.AddHealthzCheck("ping", healthz.Ping) - if err != nil { - log.Error(err, "Failed to add Healthz check.") - } - - done := make(chan error) - - // start the proxy - err = proxy.Run(done, proxy.Options{ - Address: "localhost", - Port: 8888, - KubeConfig: mgr.GetConfig(), - Cache: mgr.GetCache(), - RESTMapper: mgr.GetRESTMapper(), - ControllerMap: cMap, - OwnerInjection: flags.InjectOwnerRef, - WatchedNamespaces: []string{namespace}, - }) - if err != nil { - log.Error(err, "Error starting proxy.") - return err - } - - // start the operator - go func() { - done <- mgr.Start(signals.SetupSignalHandler()) - }() - - // wait for either to finish - err = <-done - if err != nil { - log.Error(err, "Proxy or operator exited with error.") - os.Exit(1) - } - log.Info("Exiting.") - return nil -} - -// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using -// the Prometheus operator -func addMetrics(ctx context.Context, cfg *rest.Config, gvks []schema.GroupVersionKind) { - // Get the namespace the operator is currently deployed in. - operatorNs, err := k8sutil.GetOperatorNamespace() - if err != nil { - if errors.Is(err, k8sutil.ErrRunLocal) { - log.Info("Skipping CR metrics server creation; not running in a cluster.") - return - } - } - - if err := serveCRMetrics(cfg, operatorNs, gvks); err != nil { - log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) - } - - // Add to the below struct any other metrics ports you want to expose. - servicePorts := []v1.ServicePort{ - {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, - {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, - } - - // Create Service object to expose the metrics port(s). - service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) - if err != nil { - log.Info("Could not create metrics Service", "error", err.Error()) - return - } - - // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources - // necessary to configure Prometheus to scrape metrics from this operator. - services := []*v1.Service{service} - - // The ServiceMonitor is created in the same namespace where the operator is deployed - _, err = metrics.CreateServiceMonitors(cfg, operatorNs, services) - if err != nil { - log.Info("Could not create ServiceMonitor object", "error", err.Error()) - // If this operator is deployed to a cluster without the prometheus-operator running, it will return - // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. - if err == metrics.ErrServiceMonitorNotPresent { - log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) - } - } -} - -// serveCRMetrics takes GVKs retrieved from watches and generates metrics based on those types. -// It serves those metrics on "http://metricsHost:operatorMetricsPort". -func serveCRMetrics(cfg *rest.Config, operatorNs string, gvks []schema.GroupVersionKind) error { - // The metrics will be generated from the namespaces which are returned here. - // NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error. - ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs) - if err != nil { - return err - } - - // Generate and serve custom resource specific metrics. - err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, gvks, metricsHost, operatorMetricsPort) - if err != nil { - return err - } - return nil -} - -// getAnsibleDebugLog return the value from the ANSIBLE_DEBUG_LOGS it order to -// print the full Ansible logs -func getAnsibleDebugLog() bool { - const envVar = "ANSIBLE_DEBUG_LOGS" - val := false - if envVal, ok := os.LookupEnv(envVar); ok { - if i, err := strconv.ParseBool(envVal); err != nil { - log.Info("Could not parse environment variable as an boolean; using default value", - "envVar", envVar, "default", val) - } else { - val = i - } - } else if !ok { - log.Info("Environment variable not set; using default value", "envVar", envVar, - envVar, val) - } - return val -} diff --git a/pkg/ansible/watches/watches.go b/pkg/ansible/watches/watches.go index a7bd1d079e..61c20d5546 100644 --- a/pkg/ansible/watches/watches.go +++ b/pkg/ansible/watches/watches.go @@ -32,6 +32,7 @@ import ( yaml "sigs.k8s.io/yaml" "github.com/operator-framework/operator-sdk/internal/util/projutil" + "github.com/operator-framework/operator-sdk/pkg/ansible/flags" ) var log = logf.Log.WithName("watches") @@ -230,7 +231,7 @@ func getPossibleRolePaths(path string) []string { fqcn := strings.Split(path, ".") // If fqcn is a valid fully qualified collection name, it is .. if len(fqcn) == 3 { - ansibleCollectionsPathEnv, ok := os.LookupEnv("ANSIBLE_COLLECTIONS_PATH") + ansibleCollectionsPathEnv, ok := os.LookupEnv(flags.AnsibleCollectionsPathEnvVar) if !ok || len(ansibleCollectionsPathEnv) == 0 { ansibleCollectionsPathEnv = "/usr/share/ansible/collections" home, err := os.UserHomeDir() @@ -246,7 +247,7 @@ func getPossibleRolePaths(path string) []string { } // Check for the role where Ansible would. If it exists, use it. - ansibleRolesPathEnv, ok := os.LookupEnv("ANSIBLE_ROLES_PATH") + ansibleRolesPathEnv, ok := os.LookupEnv(flags.AnsibleRolesPathEnvVar) if ok && len(ansibleRolesPathEnv) > 0 { for _, possiblePathParent := range strings.Split(ansibleRolesPathEnv, ":") { // "roles" is optionally a part of the path. Check with, and without. diff --git a/pkg/apis/scorecard/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/scorecard/v1alpha2/zz_generated.deepcopy.go index 7e864c1730..a181df4ee3 100644 --- a/pkg/apis/scorecard/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/scorecard/v1alpha2/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ // +build !ignore_autogenerated -// Copyright 2019 The Operator-SDK Authors +// Copyright 2020 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha2 @@ -34,7 +34,6 @@ func (in *ScorecardOutput) DeepCopyInto(out *ScorecardOutput) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScorecardOutput. @@ -58,23 +57,23 @@ func (in *ScorecardOutput) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScorecardTestResult) DeepCopyInto(out *ScorecardTestResult) { *out = *in - if in.Suggestions != nil { - in, out := &in.Suggestions, &out.Suggestions - *out = make([]string, len(*in)) - copy(*out, *in) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } if in.Errors != nil { in, out := &in.Errors, &out.Errors *out = make([]string, len(*in)) copy(*out, *in) } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, value := range in.Labels { - out.Labels[key] = value - } + if in.Suggestions != nil { + in, out := &in.Suggestions, &out.Suggestions + *out = make([]string, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScorecardTestResult. diff --git a/pkg/apis/scorecard/v1alpha3/formatter.go b/pkg/apis/scorecard/v1alpha3/formatter.go index 074ad06a84..db6d430787 100644 --- a/pkg/apis/scorecard/v1alpha3/formatter.go +++ b/pkg/apis/scorecard/v1alpha3/formatter.go @@ -30,12 +30,12 @@ const ( noColor = "%s\n" ) -func (s Test) MarshalText() (string, error) { +func (s Test) MarshalText() string { var sb strings.Builder - failColor := ": \033[1;" + redColor + "m%s\033[0m\n" - passColor := ": \033[1;" + greenColor + "m%s\033[0m\n" - warnColor := ": \033[1;" + yellowColor + "m%s\033[0m\n" + failColor := "\033[1;" + redColor + "m%s\033[0m" + passColor := "\033[1;" + greenColor + "m%s\033[0m" + warnColor := "\033[1;" + yellowColor + "m%s\033[0m" // turn off colorization if not in a terminal if !isatty.IsTerminal(os.Stdout.Fd()) && @@ -45,47 +45,60 @@ func (s Test) MarshalText() (string, error) { warnColor = noColor } + sb.WriteString(fmt.Sprintf("%s\n", strings.Repeat("-", 80))) + sb.WriteString(fmt.Sprintf("Image: %s\n", s.Spec.Image)) + + if len(s.Spec.Entrypoint) > 0 { + sb.WriteString(fmt.Sprintf("Entrypoint: %s\n", s.Spec.Entrypoint)) + } + if len(s.Spec.Labels) > 0 { - sb.WriteString("\tLabels: \n") + sb.WriteString("Labels:\n") for labelKey, labelValue := range s.Spec.Labels { - sb.WriteString(fmt.Sprintf("\t\t%q:%q\n", labelKey, labelValue)) + sb.WriteString(fmt.Sprintf("\t%q:%q\n", labelKey, labelValue)) } } - for _, result := range s.Status.Results { - sb.WriteString(fmt.Sprintf("\t%-35s ", result.Name)) - if result.State == PassState { - sb.WriteString(fmt.Sprintf(passColor, PassState)) - } else if result.State == FailState { - sb.WriteString(fmt.Sprintf(failColor, FailState)) - } else if result.State == ErrorState { - sb.WriteString(fmt.Sprintf(failColor, ErrorState)) - } else { + if len(s.Status.Results) > 0 { + sb.WriteString("Results:\n") + for _, result := range s.Status.Results { + if len(result.Name) > 0 { + sb.WriteString(fmt.Sprintf("\tName: %s\n", result.Name)) + } + sb.WriteString("\tState: ") + if result.State == PassState { + sb.WriteString(fmt.Sprintf(passColor, PassState)) + } else if result.State == FailState { + sb.WriteString(fmt.Sprintf(failColor, FailState)) + } else if result.State == ErrorState { + sb.WriteString(fmt.Sprintf(failColor, ErrorState)) + } else { + sb.WriteString("unknown") + } sb.WriteString("\n") - } - if len(result.Suggestions) > 0 { - sb.WriteString(fmt.Sprintf(warnColor, "Suggestions:")) - } - for _, suggestion := range result.Suggestions { - sb.WriteString(fmt.Sprintf("\t\t%s\n", suggestion)) - } + if len(result.Suggestions) > 0 { + sb.WriteString(fmt.Sprintf(warnColor, "\tSuggestions:\n")) + for _, suggestion := range result.Suggestions { + sb.WriteString(fmt.Sprintf("\t\t%s\n", suggestion)) + } + } - if len(result.Errors) > 0 { - sb.WriteString(fmt.Sprintf(failColor, "Errors:")) + if len(result.Errors) > 0 { + sb.WriteString(fmt.Sprintf(failColor, "\tErrors:\n")) + for _, err := range result.Errors { + sb.WriteString(fmt.Sprintf("\t\t%s\n", err)) + } + } - } - for _, err := range result.Errors { - sb.WriteString(fmt.Sprintf("\t\t%s\n", err)) - } - if result.Log != "" { - sb.WriteString("\tLog:\n") - scanner := bufio.NewScanner(strings.NewReader(result.Log)) - for scanner.Scan() { - sb.WriteString(fmt.Sprintf("\t\t%s\n", scanner.Text())) + if result.Log != "" { + sb.WriteString("\tLog:\n") + scanner := bufio.NewScanner(strings.NewReader(result.Log)) + for scanner.Scan() { + sb.WriteString(fmt.Sprintf("\t\t%s\n", scanner.Text())) + } } + sb.WriteString("\n") } - sb.WriteString("\n") } - - return sb.String(), nil + return sb.String() } diff --git a/pkg/apis/scorecard/v1alpha3/types.go b/pkg/apis/scorecard/v1alpha3/types.go index 6c5b6e19af..2096792e86 100644 --- a/pkg/apis/scorecard/v1alpha3/types.go +++ b/pkg/apis/scorecard/v1alpha3/types.go @@ -34,8 +34,8 @@ const ( type TestSpec struct { // Image is the name of the testimage Image string `json:"image"` - // EntryPoint is list of commands and arguments passed to the test image - EntryPoint []string `json:"entrypoint,omitempty"` + // Entrypoint is list of commands and arguments passed to the test image + Entrypoint []string `json:"entrypoint,omitempty"` // Labels that further describe the test and enable selection Labels map[string]string `json:"labels,omitempty"` } @@ -43,7 +43,7 @@ type TestSpec struct { // TestResult contains the results of an individual scorecard test type TestResult struct { // Name is the name of the test - Name string `json:"name"` + Name string `json:"name,omitempty"` // Log holds a log produced from the test (if applicable) Log string `json:"log,omitempty"` // State is the final state of the test @@ -56,23 +56,38 @@ type TestResult struct { // TestStatus contains collection of testResults. type TestStatus struct { - Results []TestResult `json:"results"` + Results []TestResult `json:"results,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Test is the schema for the scorecard API +// Test specifies a single test run. type Test struct { metav1.TypeMeta `json:",inline"` Spec TestSpec `json:"spec,omitempty"` Status TestStatus `json:"status,omitempty"` } -func NewTest() *Test { - return &Test{ +// TestList is a list of tests. +type TestList struct { + metav1.TypeMeta `json:",inline"` + Items []Test `json:"items"` +} + +func NewTest() Test { + return Test{ TypeMeta: metav1.TypeMeta{ + APIVersion: SchemeGroupVersion.String(), Kind: "Test", + }, + } +} + +func NewTestList() TestList { + return TestList{ + TypeMeta: metav1.TypeMeta{ APIVersion: SchemeGroupVersion.String(), + Kind: "TestList", }, } } diff --git a/pkg/apis/scorecard/v1alpha3/zz_generated.deepcopy.go b/pkg/apis/scorecard/v1alpha3/zz_generated.deepcopy.go index bcddf16179..0ba9be02e9 100644 --- a/pkg/apis/scorecard/v1alpha3/zz_generated.deepcopy.go +++ b/pkg/apis/scorecard/v1alpha3/zz_generated.deepcopy.go @@ -1,20 +1,18 @@ // +build !ignore_autogenerated -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2020 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Code generated by controller-gen. DO NOT EDIT. @@ -50,6 +48,29 @@ func (in *Test) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestList) DeepCopyInto(out *TestList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Test, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestList. +func (in *TestList) DeepCopy() *TestList { + if in == nil { + return nil + } + out := new(TestList) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TestResult) DeepCopyInto(out *TestResult) { *out = *in @@ -78,8 +99,8 @@ func (in *TestResult) DeepCopy() *TestResult { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TestSpec) DeepCopyInto(out *TestSpec) { *out = *in - if in.EntryPoint != nil { - in, out := &in.EntryPoint, &out.EntryPoint + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint *out = make([]string, len(*in)) copy(*out, *in) } diff --git a/pkg/helm/flags/flag.go b/pkg/helm/flags/flag.go index 87b137457d..b19fef2825 100644 --- a/pkg/helm/flags/flag.go +++ b/pkg/helm/flags/flag.go @@ -15,31 +15,37 @@ package flags import ( - "strings" + "runtime" + "time" - "github.com/operator-framework/operator-sdk/internal/flags/watch" - "github.com/operator-framework/operator-sdk/pkg/log/zap" "github.com/spf13/pflag" + + "github.com/operator-framework/operator-sdk/pkg/log/zap" ) -// HelmOperatorFlags - Options to be used by a helm operator -type HelmOperatorFlags struct { - watch.WatchFlags - MaxWorkers int +// Flags - Options to be used by a helm operator +type Flags struct { + ReconcilePeriod time.Duration + WatchesFile string + MaxWorkers int } // AddTo - Add the helm operator flags to the the flagset -// helpTextPrefix will allow you add a prefix to default help text. Joined by a space. -func AddTo(flagSet *pflag.FlagSet, helpTextPrefix ...string) *HelmOperatorFlags { - hof := &HelmOperatorFlags{} - hof.WatchFlags.AddTo(flagSet, helpTextPrefix...) +func (f *Flags) AddTo(flagSet *pflag.FlagSet) { flagSet.AddFlagSet(zap.FlagSet()) - flagSet.IntVar(&hof.MaxWorkers, + flagSet.DurationVar(&f.ReconcilePeriod, + "reconcile-period", + time.Minute, + "Default reconcile period for controllers", + ) + flagSet.StringVar(&f.WatchesFile, + "watches-file", + "./watches.yaml", + "Path to the watches file to use", + ) + flagSet.IntVar(&f.MaxWorkers, "max-workers", - 1, - strings.Join(append(helpTextPrefix, - "Maximum number of workers to use."), - " "), + runtime.NumCPU(), + "Maximum number of workers to use", ) - return hof } diff --git a/pkg/helm/run.go b/pkg/helm/run.go deleted file mode 100644 index 949ef6d043..0000000000 --- a/pkg/helm/run.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2019 The Operator-SDK Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package helm - -import ( - "context" - "errors" - "fmt" - "os" - "runtime" - "strings" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" - - "github.com/operator-framework/operator-sdk/pkg/helm/controller" - hoflags "github.com/operator-framework/operator-sdk/pkg/helm/flags" - "github.com/operator-framework/operator-sdk/pkg/helm/release" - "github.com/operator-framework/operator-sdk/pkg/helm/watches" - "github.com/operator-framework/operator-sdk/pkg/k8sutil" - kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" - "github.com/operator-framework/operator-sdk/pkg/leader" - "github.com/operator-framework/operator-sdk/pkg/metrics" - sdkVersion "github.com/operator-framework/operator-sdk/version" -) - -var ( - metricsHost = "0.0.0.0" - metricsPort int32 = 8383 - operatorMetricsPort int32 = 8686 -) - -var log = logf.Log.WithName("cmd") - -func printVersion() { - log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) - log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) - log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) -} - -// Run runs the helm operator -func Run(flags *hoflags.HelmOperatorFlags) error { - printVersion() - - cfg, err := config.GetConfig() - if err != nil { - log.Error(err, "Failed to get config.") - return err - } - - // Set default manager options - options := manager.Options{ - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), - NewClient: func(cache cache.Cache, config *rest.Config, options crclient.Options) (crclient.Client, error) { - c, err := crclient.New(config, options) - if err != nil { - return nil, err - } - return &crclient.DelegatingClient{ - Reader: cache, - Writer: c, - StatusClient: c, - }, nil - }, - } - - namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar) - log = log.WithValues("Namespace", namespace) - if found { - if namespace == metav1.NamespaceAll { - log.Info("Watching all namespaces.") - options.Namespace = metav1.NamespaceAll - } else { - if strings.Contains(namespace, ",") { - log.Info("Watching multiple namespaces.") - options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) - } else { - log.Info("Watching single namespace.") - options.Namespace = namespace - } - } - } else { - log.Info(fmt.Sprintf("%v environment variable not set. Watching all namespaces.", - k8sutil.WatchNamespaceEnvVar)) - options.Namespace = metav1.NamespaceAll - } - - mgr, err := manager.New(cfg, options) - if err != nil { - log.Error(err, "Failed to create a new manager.") - return err - } - - ws, err := watches.Load(flags.WatchesFile) - if err != nil { - log.Error(err, "Failed to create new manager factories.") - return err - } - var gvks []schema.GroupVersionKind - for _, w := range ws { - // Register the controller with the factory. - err := controller.Add(mgr, controller.WatchOptions{ - Namespace: namespace, - GVK: w.GroupVersionKind, - ManagerFactory: release.NewManagerFactory(mgr, w.ChartDir), - ReconcilePeriod: flags.ReconcilePeriod, - WatchDependentResources: *w.WatchDependentResources, - OverrideValues: w.OverrideValues, - MaxWorkers: flags.MaxWorkers, - }) - if err != nil { - log.Error(err, "Failed to add manager factory to controller.") - return err - } - gvks = append(gvks, w.GroupVersionKind) - } - - operatorName, err := k8sutil.GetOperatorName() - if err != nil { - log.Error(err, "Failed to get operator name") - return err - } - - ctx := context.TODO() - - // Become the leader before proceeding - err = leader.Become(ctx, operatorName+"-lock") - if err != nil { - log.Error(err, "Failed to become leader.") - return err - } - - addMetrics(context.TODO(), cfg, gvks) - - // Start the Cmd - if err = mgr.Start(signals.SetupSignalHandler()); err != nil { - log.Error(err, "Manager exited non-zero.") - os.Exit(1) - } - return nil -} - -// addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using -// the Prometheus operator -func addMetrics(ctx context.Context, cfg *rest.Config, gvks []schema.GroupVersionKind) { - // Get the namespace the operator is currently deployed in. - operatorNs, err := k8sutil.GetOperatorNamespace() - if err != nil { - if errors.Is(err, k8sutil.ErrRunLocal) { - log.Info("Skipping CR metrics server creation; not running in a cluster.") - return - } - } - - if err := serveCRMetrics(cfg, operatorNs, gvks); err != nil { - log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) - } - - // Add to the below struct any other metrics ports you want to expose. - servicePorts := []v1.ServicePort{ - {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, - {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, - TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, - } - - // Create Service object to expose the metrics port(s). - service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) - if err != nil { - log.Info("Could not create metrics Service", "error", err.Error()) - } - - // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources - // necessary to configure Prometheus to scrape metrics from this operator. - services := []*v1.Service{service} - - // The ServiceMonitor is created in the same namespace where the operator is deployed - _, err = metrics.CreateServiceMonitors(cfg, operatorNs, services) - if err != nil { - log.Info("Could not create ServiceMonitor object", "error", err.Error()) - // If this operator is deployed to a cluster without the prometheus-operator running, it will return - // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. - if err == metrics.ErrServiceMonitorNotPresent { - log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) - } - } -} - -// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. -// It serves those metrics on "http://metricsHost:operatorMetricsPort". -func serveCRMetrics(cfg *rest.Config, operatorNs string, gvks []schema.GroupVersionKind) error { - // The metrics will be generated from the namespaces which are returned here. - // NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error. - ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs) - if err != nil { - return err - } - - // Generate and serve custom resource specific metrics. - err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, gvks, metricsHost, operatorMetricsPort) - if err != nil { - return err - } - return nil -} diff --git a/website/content/en/docs/scorecard/custom-tests.md b/website/content/en/docs/scorecard/custom-tests.md index 40b54253e7..bf02b76183 100644 --- a/website/content/en/docs/scorecard/custom-tests.md +++ b/website/content/en/docs/scorecard/custom-tests.md @@ -86,17 +86,16 @@ func CustomTest1(bundle registry.Bundle) scapiv1alpha2.ScorecardTestResult { The [configuration file][config_yaml] includes test definitions and metadata to run the test. For the example `CustomTest1` function, the following fields should be specified in `config.yaml`. ```yaml -tests: -- name: "customtest1" - image: quay.io/username/custom-scorecard-tests:dev - entrypoint: - - custom-scorecard-tests - - customtest1 - labels: - suite: custom - test: customtest1 - description: an ISV custom test - ``` +stages: +- tests: + - image: quay.io/username/custom-scorecard-tests:dev + entrypoint: + - custom-scorecard-tests + - customtest1 + labels: + suite: custom + test: customtest1 + ``` The important fields to note here are: 1. `image` - name and tag of the test image which was specified in the Makefile. diff --git a/website/content/en/docs/scorecard/kuttl-tests.md b/website/content/en/docs/scorecard/kuttl-tests.md index 207473b3ec..c685b36a7b 100644 --- a/website/content/en/docs/scorecard/kuttl-tests.md +++ b/website/content/en/docs/scorecard/kuttl-tests.md @@ -48,13 +48,12 @@ cases under the scorecard/kuttl directory within the bundle contents. In the scorecard configuration file, you might have the following definition of what the selector `suite=kuttlsuite` will translate to: ```yaml -tests: -- name: "kuttltest1" - image: quay.io/operator-framework/scorecard-test-kuttl:dev - labels: - suite: kuttlsuite - test: kuttltest1 - description: an ISV custom test that does... +stages: +- tests: + - image: quay.io/operator-framework/scorecard-test-kuttl:dev + labels: + suite: kuttlsuite + test: kuttltest1 ``` This test configuration will execute the scorecard-test-kuttl diff --git a/website/content/en/docs/scorecard/scorecard-alpha.md b/website/content/en/docs/scorecard/scorecard-alpha.md index 365f7db8c5..70b3964aa8 100644 --- a/website/content/en/docs/scorecard/scorecard-alpha.md +++ b/website/content/en/docs/scorecard/scorecard-alpha.md @@ -13,16 +13,16 @@ Tests are implemented within test images that are configured and constructed to be executed by scorecard. Scorecard assumes it is being executed with access to a configured -Kube cluster. Each test is executed within a Pod by scorecard, +Kubernetes cluster. Each test is executed within a Pod by scorecard, from which pod logs are aggregated and test results sent to the console. -Scorecard has built-in basic and OLM tests but also provides a +Scorecard has built-in basic and OLM tests, and it also provides a means to execute custom test definitions. ## Requirements The scorecard tests make no assumptions as to the state of the -operator being tested. Creating operators and custom resources +operator being tested. Creating operators and custom resources for an operator are left outside the scope of the scorecard itself. Scorecard tests can however create whatever resources they @@ -37,7 +37,7 @@ of the configuration file format. Unless you are executing custom tests, you can just copy the provided example configuration file into your project. 2. Place the scorecard configuration file within your project -bundle directory a the following location `tests/scorecard/config.yaml`. +bundle directory at the following location `tests/scorecard/config.yaml`. You can override the default location of the configuration file by specifying the `--config` flag. 3. Execute the [`scorecard` command][cli-scorecard]. See the @@ -60,43 +60,42 @@ and used for running the scorecard pre-defined tests that ship with the SDK. A sample of the scorecard configuration file may look as follows: ```yaml -tests: -- name: "basic-check-spec" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test - description: check the spec test -- name: "olm-bundle-validation" - image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test - description: validate the bundle test +stages: +- parallel: true + tests: + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-bundle-validation + labels: + suite: olm + test: olm-bundle-validation-test ``` -The configuration file defines each test that scorecard can execute. The -following fields of the scorecard configuration file define the test -as follows: +The configuration file defines the tests that scorecard executes. Tests are +grouped into stages for fine-grained control of [parallelism](#parallelism). +The following fields of the scorecard configuration file define the test as +follows: -| Config Field | Description | -| -------- | -------- | -| image | the test container image name that implements a test -| entrypoint | the command and arguments that are invoked in the test image to execute a test -| labels | scorecard-defined or custom labels that [select](#selecting-tests) which tests to run +| Config Field | Description +| ------------ | ----------- +| image | the test container image name that implements a test +| entrypoint | the command and arguments that are invoked in the test image to execute a test +| labels | scorecard-defined or custom labels that [select](#selecting-tests) which tests to run ### Command Args The scorecard command has the following syntax: ``` -operator-sdk alpha scorecard [bundle path] | [bundle image name] [flags] +operator-sdk alpha scorecard [flags] ``` The scorecard requires a positional argument that holds either the @@ -104,6 +103,24 @@ on-disk path to your operator bundle or the name of a bundle image. For further information about the flags see the [CLI documentation][cli-scorecard]. +## Parallelism + +The configuration file allows operator developers to define separate stages for +their tests. Stages run sequentially in the order they are defined in the +configuration file. A stage contains a list of tests and a configurable +`parallel` setting. + +By default (or when a stage explicitly sets `parallel` to `false`), tests in +a stage are run sequentially in the order they are defined in the configuration +file. Running tests one at a time is helpful to guarantee that no two tests +interact and conflict with each other. + +However, if tests are designed to be fully isolated, they can be parallelized. +To run a set of isolated tests in parallel, include them in the same stage and +set `parallel` to `true`. All tests in a parallel stage are executed +simultaneously, and scorecard waits for all of them to finish before proceding +to the next stage. This can make your tests run much faster. + ## Selecting Tests Tests are selected by setting the `--selector` CLI flag to @@ -160,20 +177,34 @@ See an example of the JSON format produced by a scorecard test: ```json { - "spec": { - "image": "" - }, - "status": { - "results": [ - { - "name": "olm-bundle-validation", - "log": "time=\"2020-06-10T19:02:49Z\" level=debug msg=\"Found manifests directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=debug msg=\"Found metadata directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level -=debug msg=\"Getting mediaType info from manifests directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=info msg=\"Found annotations file\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=info msg=\"Could not find optio -nal dependencies file\" name=bundle-test\n", - "state": "pass" + "apiVersion": "scorecard.operatorframework.io/v1alpha3", + "kind": "TestList", + "items": [ + { + "kind": "Test", + "apiVersion": "scorecard.operatorframework.io/v1alpha3", + "spec": { + "image": "quay.io/operator-framework/scorecard-test:dev", + "entrypoint": [ + "scorecard-test", + "olm-bundle-validation" + ], + "labels": { + "suite": "olm", + "test": "olm-bundle-validation-test" + } + }, + "status": { + "results": [ + { + "name": "olm-bundle-validation", + "log": "time=\"2020-06-10T19:02:49Z\" level=debug msg=\"Found manifests directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=debug msg=\"Found metadata directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=debug msg=\"Getting mediaType info from manifests directory\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=info msg=\"Found annotations file\" name=bundle-test\ntime=\"2020-06-10T19:02:49Z\" level=info msg=\"Could not find optional dependencies file\" name=bundle-test\n", + "state": "pass" + } + ] } - ] - } + } + ] } ``` @@ -182,17 +213,24 @@ nal dependencies file\" name=bundle-test\n", See an example of the text format produced by a scorecard test: ``` - Labels: - olm-bundle-validation : pass - Log: - time="2020-06-10T19:00:43Z" level=debug msg="Found manifests directory" name=bundle-test - time="2020-06-10T19:00:43Z" level=debug msg="Found metadata directory" name=bundle-test - time="2020-06-10T19:00:43Z" level=debug msg="Getting mediaType info from manifests directory" name=bundle-test - time="2020-06-10T19:00:43Z" level=info msg="Found annotations file" name=bundle-test - time="2020-06-10T19:00:43Z" level=info msg="Could not find optional dependencies file" name=bundle-test +-------------------------------------------------------------------------------- +Image: quay.io/operator-framework/scorecard-test:dev +Entrypoint: [scorecard-test olm-bundle-validation] +Labels: + "suite":"olm" + "test":"olm-bundle-validation-test" +Results: + Name: olm-bundle-validation + State: pass + Log: + time="2020-07-15T03:19:02Z" level=debug msg="Found manifests directory" name=bundle-test + time="2020-07-15T03:19:02Z" level=debug msg="Found metadata directory" name=bundle-test + time="2020-07-15T03:19:02Z" level=debug msg="Getting mediaType info from manifests directory" name=bundle-test + time="2020-07-15T03:19:02Z" level=info msg="Found annotations file" name=bundle-test + time="2020-07-15T03:19:02Z" level=info msg="Could not find optional dependencies file" name=bundle-test ``` -**NOTE** The output format spec matches the [`Test`](https://godoc.org/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3#Test) type layout. +**NOTE** The output format spec for each test matches the [`Test`](https://godoc.org/github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3#Test) type layout. ## Exit Status @@ -208,7 +246,7 @@ Scorecard will execute custom tests if they follow these mandated conventions: * tests accept an entrypoint which include a command and arguments * tests produce v1alpha3 scorecard output in JSON format with no extraneous logging in the test output * tests can obtain the bundle contents at a shared mount point of /bundle - * tests can access the Kube API using an in-cluster client connection + * tests can access the Kubernetes API using an in-cluster client connection