diff --git a/changelog/fragments/scorecard-parallel.yaml b/changelog/fragments/scorecard-parallel.yaml new file mode 100644 index 0000000000..862e3dd4cb --- /dev/null +++ b/changelog/fragments/scorecard-parallel.yaml @@ -0,0 +1,55 @@ +entries: + - description: > + Changes the scorecard configuration format to include a new top-level + `stages` field, which allows users to define stages of tests and to + enable parallelism within each stage. + + kind: "change" + + # Is this a breaking change? + breaking: true + + # NOTE: ONLY USE `pull_request_override` WHEN ADDING THIS + # FILE FOR A PREVIOUSLY MERGED PULL_REQUEST! + # + # The generator auto-detects the PR number from the commit + # message in which this file was originally added. + # + # What is the pull request number (without the "#")? + # pull_request_override: 0 + + + # Migration can be defined to automatically add a section to + # the migration guide. This is required for breaking changes. + migration: + header: Add stages to scorecard configuration file + body: | + Update your scorecard configuration file to contain a new top-level + stages field, and move your test definitions under one or more stages. + + **Example** + + ```yaml + tests: + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + ``` + + should be converted to + + ```yaml + stages: + - tests: + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + ``` diff --git a/internal/registry/labels.go b/internal/registry/labels.go index ed03778963..b51ec23156 100644 --- a/internal/registry/labels.go +++ b/internal/registry/labels.go @@ -27,7 +27,7 @@ import ( registrybundle "github.com/operator-framework/operator-registry/pkg/lib/bundle" log "github.com/sirupsen/logrus" - // TODO: replace `gopkg.in/yaml.v2` with `sigs.k8s.io/yaml` once operator-registry has `json` tags in the + // TODO: replace `gopkg.in/yaml.v3` with `sigs.k8s.io/yaml` once operator-registry has `json` tags in the // annotations struct. yaml "gopkg.in/yaml.v3" ) diff --git a/internal/scorecard/alpha/config.go b/internal/scorecard/alpha/config.go index f05f8016cd..2eef8a6045 100644 --- a/internal/scorecard/alpha/config.go +++ b/internal/scorecard/alpha/config.go @@ -25,6 +25,11 @@ const ( ConfigDirPath = "/tests/" + ConfigDirName + "/" ) +type Stage struct { + Parallel bool `yaml:"parallel"` + Tests []Test `yaml:"tests"` +} + type Test struct { // Image is the name of the testimage Image string `json:"image"` @@ -37,7 +42,7 @@ type Test struct { // Config represents the set of test configurations which scorecard // would run based on user input type Config struct { - Tests []Test `yaml:"tests"` + Stages []Stage `yaml:"stages"` } // LoadConfig will find and return the scorecard config, the config file diff --git a/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml b/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml index 8ccedd19e4..103a2ec1b4 100644 --- a/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml +++ b/internal/scorecard/alpha/examples/custom-scorecard-tests/bundle/tests/scorecard/config.yaml @@ -1,15 +1,16 @@ -tests: -- image: quay.io/username/custom-scorecard-tests:dev - entrypoint: - - custom-scorecard-tests - - customtest1 - labels: - suite: custom - test: customtest1 -- image: quay.io/username/custom-scorecard-tests:dev - entrypoint: - - custom-scorecard-tests - - customtest2 - labels: - suite: custom - test: customtest2 +stages: +- tests: + - image: quay.io/username/custom-scorecard-tests:dev + entrypoint: + - custom-scorecard-tests + - customtest1 + labels: + suite: custom + test: customtest1 + - image: quay.io/username/custom-scorecard-tests:dev + entrypoint: + - custom-scorecard-tests + - customtest2 + labels: + suite: custom + test: customtest2 diff --git a/internal/scorecard/alpha/formatting.go b/internal/scorecard/alpha/formatting.go index 15d0fc18f8..389a3698a6 100644 --- a/internal/scorecard/alpha/formatting.go +++ b/internal/scorecard/alpha/formatting.go @@ -42,17 +42,17 @@ func (r PodTestRunner) getTestStatus(ctx context.Context, p *v1.Pod) (output *v1 // run based on user selection func (o Scorecard) List() v1alpha3.TestList { output := v1alpha3.NewTestList() - tests := o.selectTests() - - for _, test := range tests { - item := v1alpha3.NewTest() - item.Spec = v1alpha3.TestSpec{ - Image: test.Image, - Entrypoint: test.Entrypoint, - Labels: test.Labels, + for _, stage := range o.Config.Stages { + tests := o.selectTests(stage) + for _, test := range tests { + item := v1alpha3.NewTest() + item.Spec = v1alpha3.TestSpec{ + Image: test.Image, + Entrypoint: test.Entrypoint, + Labels: test.Labels, + } + output.Items = append(output.Items, item) } - output.Items = append(output.Items, item) } - return output } diff --git a/internal/scorecard/alpha/labels_test.go b/internal/scorecard/alpha/labels_test.go index 1959f98418..8bd4d2d605 100644 --- a/internal/scorecard/alpha/labels_test.go +++ b/internal/scorecard/alpha/labels_test.go @@ -55,7 +55,7 @@ func TestEmptySelector(t *testing.T) { return } - tests := o.selectTests() + tests := o.selectTests(o.Config.Stages[0]) testsSelected := len(tests) if testsSelected != c.testsSelected { t.Errorf("Wanted testsSelected %d, got: %d", c.testsSelected, testsSelected) @@ -65,51 +65,52 @@ func TestEmptySelector(t *testing.T) { } } -const testConfig = `tests: -- image: quay.io/someuser/customtest1:v0.0.1 - entrypoint: - - custom-test - labels: - suite: custom - test: customtest1 -- image: quay.io/someuser/customtest2:v0.0.1 - entrypoint: - - custom-test - labels: - suite: custom - test: customtest2 -- image: quay.io/redhat/basictests:v0.0.1 - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test -- image: quay.io/redhat/basictests:v0.0.1 - entrypoint: - - scorecard-test - - basic-check-status - labels: - suite: basic - test: basic-check-status-test -- image: quay.io/redhat/olmtests:v0.0.1 - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test -- image: quay.io/redhat/olmtests:v0.0.1 - entrypoint: - - scorecard-test - - olm-crds-have-validation - labels: - suite: olm - test: olm-crds-have-validation-test -- image: quay.io/redhat/kuttltests:v0.0.1 - labels: - suite: kuttl - entrypoint: - - kuttl-test - - olm-status-descriptors - ` +const testConfig = `stages: +- tests: + - image: quay.io/someuser/customtest1:v0.0.1 + entrypoint: + - custom-test + labels: + suite: custom + test: customtest1 + - image: quay.io/someuser/customtest2:v0.0.1 + entrypoint: + - custom-test + labels: + suite: custom + test: customtest2 + - image: quay.io/redhat/basictests:v0.0.1 + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + - image: quay.io/redhat/basictests:v0.0.1 + entrypoint: + - scorecard-test + - basic-check-status + labels: + suite: basic + test: basic-check-status-test + - image: quay.io/redhat/olmtests:v0.0.1 + entrypoint: + - scorecard-test + - olm-bundle-validation + labels: + suite: olm + test: olm-bundle-validation-test + - image: quay.io/redhat/olmtests:v0.0.1 + entrypoint: + - scorecard-test + - olm-crds-have-validation + labels: + suite: olm + test: olm-crds-have-validation-test + - image: quay.io/redhat/kuttltests:v0.0.1 + labels: + suite: kuttl + entrypoint: + - kuttl-test + - olm-status-descriptors +` diff --git a/internal/scorecard/alpha/run_test.go b/internal/scorecard/alpha/run_test.go index f18eaa4c43..84a5d8032a 100644 --- a/internal/scorecard/alpha/run_test.go +++ b/internal/scorecard/alpha/run_test.go @@ -16,6 +16,7 @@ package alpha import ( "context" + "errors" "path/filepath" "testing" "time" @@ -25,8 +26,8 @@ import ( "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha3" ) +// TODO(joelanford): rewrite to use ginkgo/gomega func TestRunTests(t *testing.T) { - cases := []struct { name string configPathValue string @@ -100,3 +101,92 @@ func TestRunTests(t *testing.T) { } } + +// TODO(joelanford): rewrite to use ginkgo/gomega +func TestRunParallelPass(t *testing.T) { + scorecard := getFakeScorecard(true) + ctx, cancel := context.WithTimeout(context.Background(), 7*time.Millisecond) + defer cancel() + + tests, err := scorecard.Run(ctx) + if err != nil { + t.Fatalf("Expected no error, got error: %v", err) + } + if len(tests.Items) != 2 { + t.Fatalf("Expected 2 tests, got %d", len(tests.Items)) + } + for _, test := range tests.Items { + expectPass(t, test) + } +} + +// TODO(joelanford): rewrite to use ginkgo/gomega +func TestRunSequentialPass(t *testing.T) { + scorecard := getFakeScorecard(false) + ctx, cancel := context.WithTimeout(context.Background(), 12*time.Millisecond) + defer cancel() + + tests, err := scorecard.Run(ctx) + if err != nil { + t.Fatalf("Expected no error, got error: %v", err) + } + if len(tests.Items) != 2 { + t.Fatalf("Expected 2 tests, got %d", len(tests.Items)) + } + for _, test := range tests.Items { + expectPass(t, test) + } +} + +// TODO(joelanford): rewrite to use ginkgo/gomega +func TestRunSequentialFail(t *testing.T) { + scorecard := getFakeScorecard(false) + + ctx, cancel := context.WithTimeout(context.Background(), 7*time.Millisecond) + defer cancel() + + _, err := scorecard.Run(ctx) + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("Expected deadline exceeded error, got: %v", err) + } +} + +func getFakeScorecard(parallel bool) Scorecard { + return Scorecard{ + Config: Config{ + Stages: []Stage{ + { + Parallel: parallel, + Tests: []Test{ + {}, + {}, + }, + }, + }, + }, + TestRunner: FakeTestRunner{ + Sleep: 5 * time.Millisecond, + TestStatus: &v1alpha3.TestStatus{ + Results: []v1alpha3.TestResult{ + { + State: v1alpha3.PassState, + }, + }, + }, + }, + } +} + +func expectPass(t *testing.T, test v1alpha3.Test) { + if len(test.Status.Results) != 1 { + t.Fatalf("Expected 1 results, got %d", len(test.Status.Results)) + } + for _, r := range test.Status.Results { + if len(r.Errors) > 0 { + t.Fatalf("Expected no errors, got %v", r.Errors) + } + if r.State != v1alpha3.PassState { + t.Fatalf("Expected result state %q, got %q", v1alpha3.PassState, r.State) + } + } +} diff --git a/internal/scorecard/alpha/scorecard.go b/internal/scorecard/alpha/scorecard.go index 03fbb4ff96..623e35bf78 100644 --- a/internal/scorecard/alpha/scorecard.go +++ b/internal/scorecard/alpha/scorecard.go @@ -17,6 +17,7 @@ package alpha import ( "context" "fmt" + "sync" "time" v1 "k8s.io/api/core/v1" @@ -53,6 +54,7 @@ type PodTestRunner struct { } type FakeTestRunner struct { + Sleep time.Duration TestStatus *v1alpha3.TestStatus Error error } @@ -65,25 +67,22 @@ func (o Scorecard) Run(ctx context.Context) (v1alpha3.TestList, error) { return testOutput, err } - tests := o.selectTests() - if len(tests) == 0 { - return testOutput, nil - } - - for _, test := range tests { - result, err := o.TestRunner.RunTest(ctx, test) - if err != nil { - result = convertErrorToStatus(err, "") + for _, stage := range o.Config.Stages { + tests := o.selectTests(stage) + if len(tests) == 0 { + continue } - out := v1alpha3.NewTest() - out.Spec = v1alpha3.TestSpec{ - Image: test.Image, - Entrypoint: test.Entrypoint, - Labels: test.Labels, + output := make(chan v1alpha3.Test, len(tests)) + if stage.Parallel { + o.runStageParallel(ctx, tests, output) + } else { + o.runStageSequential(ctx, tests, output) + } + close(output) + for o := range output { + testOutput.Items = append(testOutput.Items, o) } - out.Status = *result - testOutput.Items = append(testOutput.Items, out) } if !o.SkipCleanup { @@ -94,14 +93,46 @@ func (o Scorecard) Run(ctx context.Context) (v1alpha3.TestList, error) { return testOutput, nil } +func (o Scorecard) runStageParallel(ctx context.Context, tests []Test, results chan<- v1alpha3.Test) { + var wg sync.WaitGroup + for _, t := range tests { + wg.Add(1) + go func(test Test) { + results <- o.runTest(ctx, test) + wg.Done() + }(t) + } + wg.Wait() +} + +func (o Scorecard) runStageSequential(ctx context.Context, tests []Test, results chan<- v1alpha3.Test) { + for _, test := range tests { + results <- o.runTest(ctx, test) + } +} + +func (o Scorecard) runTest(ctx context.Context, test Test) v1alpha3.Test { + result, err := o.TestRunner.RunTest(ctx, test) + if err != nil { + result = convertErrorToStatus(err, "") + } + + out := v1alpha3.NewTest() + out.Spec = v1alpha3.TestSpec{ + Image: test.Image, + Entrypoint: test.Entrypoint, + Labels: test.Labels, + } + out.Status = *result + return out +} + // selectTests applies an optionally passed selector expression // against the configured set of tests, returning the selected tests -func (o Scorecard) selectTests() []Test { - +func (o *Scorecard) selectTests(stage Stage) []Test { selected := make([]Test, 0) - - for _, test := range o.Config.Tests { - if o.Selector.String() == "" || o.Selector.Matches(labels.Set(test.Labels)) { + for _, test := range stage.Tests { + if o.Selector == nil || o.Selector.String() == "" || o.Selector.Matches(labels.Set(test.Labels)) { // TODO olm manifests check selected = append(selected, test) } @@ -175,10 +206,10 @@ func (r PodTestRunner) RunTest(ctx context.Context, test Test) (*v1alpha3.TestSt // RunTest executes a single test func (r FakeTestRunner) RunTest(ctx context.Context, test Test) (result *v1alpha3.TestStatus, err error) { select { + case <-time.After(r.Sleep): + return r.TestStatus, r.Error case <-ctx.Done(): return nil, ctx.Err() - default: - return r.TestStatus, r.Error } } diff --git a/internal/scorecard/alpha/testdata/bundle.tar.gz b/internal/scorecard/alpha/testdata/bundle.tar.gz index 5e0933ba70..6ae010d440 100644 Binary files a/internal/scorecard/alpha/testdata/bundle.tar.gz and b/internal/scorecard/alpha/testdata/bundle.tar.gz differ diff --git a/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml b/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml index 265eac4cb7..4831301bbc 100644 --- a/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml +++ b/internal/scorecard/alpha/testdata/bundle/tests/scorecard/config.yaml @@ -1,43 +1,45 @@ -tests: -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-crds-have-validation - labels: - suite: olm - test: olm-crds-have-validation-test -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-crds-have-resources - labels: - suite: olm - test: olm-crds-have-resources-test -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-spec-descriptors - labels: - suite: olm - test: olm-spec-descriptors-test -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-status-descriptors - labels: - suite: olm - test: olm-status-descriptors-test +stages: +- parallel: true + tests: + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-bundle-validation + labels: + suite: olm + test: olm-bundle-validation-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-crds-have-validation + labels: + suite: olm + test: olm-crds-have-validation-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-crds-have-resources + labels: + suite: olm + test: olm-crds-have-resources-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-spec-descriptors + labels: + suite: olm + test: olm-spec-descriptors-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-status-descriptors + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/website/content/en/docs/scorecard/custom-tests.md b/website/content/en/docs/scorecard/custom-tests.md index 4552341a65..bf02b76183 100644 --- a/website/content/en/docs/scorecard/custom-tests.md +++ b/website/content/en/docs/scorecard/custom-tests.md @@ -86,15 +86,16 @@ func CustomTest1(bundle registry.Bundle) scapiv1alpha2.ScorecardTestResult { The [configuration file][config_yaml] includes test definitions and metadata to run the test. For the example `CustomTest1` function, the following fields should be specified in `config.yaml`. ```yaml -tests: -- image: quay.io/username/custom-scorecard-tests:dev - entrypoint: - - custom-scorecard-tests - - customtest1 - labels: - suite: custom - test: customtest1 -``` +stages: +- tests: + - image: quay.io/username/custom-scorecard-tests:dev + entrypoint: + - custom-scorecard-tests + - customtest1 + labels: + suite: custom + test: customtest1 + ``` The important fields to note here are: 1. `image` - name and tag of the test image which was specified in the Makefile. diff --git a/website/content/en/docs/scorecard/kuttl-tests.md b/website/content/en/docs/scorecard/kuttl-tests.md index 91f552b863..c685b36a7b 100644 --- a/website/content/en/docs/scorecard/kuttl-tests.md +++ b/website/content/en/docs/scorecard/kuttl-tests.md @@ -48,11 +48,12 @@ cases under the scorecard/kuttl directory within the bundle contents. In the scorecard configuration file, you might have the following definition of what the selector `suite=kuttlsuite` will translate to: ```yaml -tests: -- image: quay.io/operator-framework/scorecard-test-kuttl:dev - labels: - suite: kuttlsuite - test: kuttltest1 +stages: +- tests: + - image: quay.io/operator-framework/scorecard-test-kuttl:dev + labels: + suite: kuttlsuite + test: kuttltest1 ``` This test configuration will execute the scorecard-test-kuttl diff --git a/website/content/en/docs/scorecard/scorecard-alpha.md b/website/content/en/docs/scorecard/scorecard-alpha.md index 550c817d4f..70b3964aa8 100644 --- a/website/content/en/docs/scorecard/scorecard-alpha.md +++ b/website/content/en/docs/scorecard/scorecard-alpha.md @@ -13,16 +13,16 @@ Tests are implemented within test images that are configured and constructed to be executed by scorecard. Scorecard assumes it is being executed with access to a configured -Kube cluster. Each test is executed within a Pod by scorecard, +Kubernetes cluster. Each test is executed within a Pod by scorecard, from which pod logs are aggregated and test results sent to the console. -Scorecard has built-in basic and OLM tests but also provides a +Scorecard has built-in basic and OLM tests, and it also provides a means to execute custom test definitions. ## Requirements The scorecard tests make no assumptions as to the state of the -operator being tested. Creating operators and custom resources +operator being tested. Creating operators and custom resources for an operator are left outside the scope of the scorecard itself. Scorecard tests can however create whatever resources they @@ -37,7 +37,7 @@ of the configuration file format. Unless you are executing custom tests, you can just copy the provided example configuration file into your project. 2. Place the scorecard configuration file within your project -bundle directory a the following location `tests/scorecard/config.yaml`. +bundle directory at the following location `tests/scorecard/config.yaml`. You can override the default location of the configuration file by specifying the `--config` flag. 3. Execute the [`scorecard` command][cli-scorecard]. See the @@ -60,39 +60,42 @@ and used for running the scorecard pre-defined tests that ship with the SDK. A sample of the scorecard configuration file may look as follows: ```yaml -tests: -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - basic-check-spec - labels: - suite: basic - test: basic-check-spec-test -- image: quay.io/operator-framework/scorecard-test:dev - entrypoint: - - scorecard-test - - olm-bundle-validation - labels: - suite: olm - test: olm-bundle-validation-test +stages: +- parallel: true + tests: + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - basic-check-spec + labels: + suite: basic + test: basic-check-spec-test + - image: quay.io/operator-framework/scorecard-test:dev + entrypoint: + - scorecard-test + - olm-bundle-validation + labels: + suite: olm + test: olm-bundle-validation-test ``` -The configuration file defines each test that scorecard can execute. The -following fields of the scorecard configuration file define the test -as follows: +The configuration file defines the tests that scorecard executes. Tests are +grouped into stages for fine-grained control of [parallelism](#parallelism). +The following fields of the scorecard configuration file define the test as +follows: -| Config Field | Description | -| -------- | -------- | -| image | the test container image name that implements a test -| entrypoint | the command and arguments that are invoked in the test image to execute a test -| labels | scorecard-defined or custom labels that [select](#selecting-tests) which tests to run +| Config Field | Description +| ------------ | ----------- +| image | the test container image name that implements a test +| entrypoint | the command and arguments that are invoked in the test image to execute a test +| labels | scorecard-defined or custom labels that [select](#selecting-tests) which tests to run ### Command Args The scorecard command has the following syntax: ``` -operator-sdk alpha scorecard [bundle path] | [bundle image name] [flags] +operator-sdk alpha scorecard [flags] ``` The scorecard requires a positional argument that holds either the @@ -100,6 +103,24 @@ on-disk path to your operator bundle or the name of a bundle image. For further information about the flags see the [CLI documentation][cli-scorecard]. +## Parallelism + +The configuration file allows operator developers to define separate stages for +their tests. Stages run sequentially in the order they are defined in the +configuration file. A stage contains a list of tests and a configurable +`parallel` setting. + +By default (or when a stage explicitly sets `parallel` to `false`), tests in +a stage are run sequentially in the order they are defined in the configuration +file. Running tests one at a time is helpful to guarantee that no two tests +interact and conflict with each other. + +However, if tests are designed to be fully isolated, they can be parallelized. +To run a set of isolated tests in parallel, include them in the same stage and +set `parallel` to `true`. All tests in a parallel stage are executed +simultaneously, and scorecard waits for all of them to finish before proceding +to the next stage. This can make your tests run much faster. + ## Selecting Tests Tests are selected by setting the `--selector` CLI flag to @@ -225,7 +246,7 @@ Scorecard will execute custom tests if they follow these mandated conventions: * tests accept an entrypoint which include a command and arguments * tests produce v1alpha3 scorecard output in JSON format with no extraneous logging in the test output * tests can obtain the bundle contents at a shared mount point of /bundle - * tests can access the Kube API using an in-cluster client connection + * tests can access the Kubernetes API using an in-cluster client connection