diff --git a/.gitignore b/.gitignore index 7c504700..44c5d080 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,9 @@ # Temporary Build Files build/_output build/_test +# Goland +.idea + # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode ### Emacs ### # -*- mode: gitignore; -*- diff --git a/cmd/manager/kodata/knative-serving/dummy.go b/cmd/manager/kodata/knative-serving/dummy.go new file mode 100644 index 00000000..4e9a54b8 --- /dev/null +++ b/cmd/manager/kodata/knative-serving/dummy.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package knative_serving is a dummy buildable source file to satisfy `dep` +package knative_serving diff --git a/hack/update-deps.sh b/hack/update-deps.sh index 997832a1..ec0e1bc1 100755 --- a/hack/update-deps.sh +++ b/hack/update-deps.sh @@ -20,13 +20,13 @@ set -o pipefail source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh -cd ${REPO_ROOT_DIR} +cd "${REPO_ROOT_DIR}" # Ensure we have everything we need under vendor/ dep ensure -rm -rf $(find vendor/ -name 'OWNERS') -rm -rf $(find vendor/ -name '*_test.go') +find vendor/ -name 'OWNERS' -delete +find vendor/ -name '*_test.go' -delete update_licenses third_party/VENDOR-LICENSE "./cmd/*" diff --git a/test/context.go b/test/context.go new file mode 100644 index 00000000..1f95219e --- /dev/null +++ b/test/context.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import "testing" + +// Context represents a testing context that can be tuned to override specific +// parts of a suite +type Context struct { + t *testing.T + overrides []Specification + activeTests []string +} + +// T returns standard testing.T +func (ctx Context) T() *testing.T { + return ctx.t +} + +// Runner creates a new runner based on a given context +func (ctx Context) Runner() Runner { + return contextRunner{ + ctx: &ctx, + } +} + +// RunSuite executes given test or execute it's override +func (runner contextRunner) Run(name string, testfunc func(t *testing.T)) bool { + ctx := runner.ctx + t := ctx.t + for _, spec := range ctx.overrides { + if spec.matchesNameInContext(name, ctx) { + t.Logf("Overriding %s test", spec.name()) + return t.Run(name, spec.testfunc(ctx)) + } + } + return t.Run(name, testfunc) +} + +// WithOverride adds specification to be executed instead of given test +func (ctx *Context) WithOverride(spec Specification) *Context { + ctx.overrides = append(ctx.overrides, spec) + return ctx +} + +// RunSuite will run a test suite within given context +func (ctx *Context) RunSuite(suite []Specification) { + for _, spec := range suite { + spec.run(ctx) + } +} + +// NewContext creates a new context +func NewContext(t *testing.T) *Context { + return &Context{ + t: t, + overrides: make([]Specification, 0), + activeTests: make([]string, 0), + } +} + +func (ctx *Context) push(testname string) { + ctx.activeTests = append(ctx.activeTests, testname) +} + +func (ctx *Context) pop() string { + // Top element + n := len(ctx.activeTests) - 1 + testname := ctx.activeTests[n] + ctx.activeTests = ctx.activeTests[:n] + return testname +} + +type contextRunner struct { + ctx *Context +} diff --git a/test/e2e/compliance.go b/test/e2e/compliance.go new file mode 100644 index 00000000..d23d7c48 --- /dev/null +++ b/test/e2e/compliance.go @@ -0,0 +1,26 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import "knative.dev/serving-operator/test" + +// ComplianceSuite can be executed by productized code's test plan to assert +// compliance with upstream knative +func ComplianceSuite() []test.Specification { + return suite +} + +var suite = []test.Specification{ + test.NewContextualSpec("TestKnativeServingDeployment", testKnativeServingDeployment), +} diff --git a/test/e2e/compliance_test.go b/test/e2e/compliance_test.go new file mode 100644 index 00000000..1a301e48 --- /dev/null +++ b/test/e2e/compliance_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "knative.dev/serving-operator/test" + "testing" +) + +func TestComplianceSuite(t *testing.T) { + suite := ComplianceSuite() + + if len(suite) <= 0 { + t.Error("There should be some tests that are exported as compliance suite") + } +} + +var runState map[string]int + +const exampleJira = "XXXX-1234" + +func TestSkipOfSpecificPartOfTestSuite(t *testing.T) { + suite := exampleComplianceSuite() + + runState = map[string]int{ + "alpha": 0, + "beta": 0, + "gamma": 0, + } + + test. + NewContext(t). + WithOverride(test.Skipf("TestParent/beta", "Skip due to %v", exampleJira)). + RunSuite(suite) + + if runState["alpha"] != 1 { + t.Error("Alpha should be executed just once") + } + if runState["beta"] != 0 { + t.Error("Beta should be skipped, but wasn't") + } + if runState["gamma"] != 1 { + t.Error("Gamma should be executed just once") + } +} + +func exampleComplianceSuite() []test.Specification { + return []test.Specification{ + test.NewContextualSpec("TestParent", testParent), + } +} + +func testParent(ctx *test.Context) { + r := ctx.Runner() + + r.Run("alpha", func(t *testing.T) { + t.Log("Alpha is OK") + runState["alpha"]++ + }) + + r.Run("beta", func(t *testing.T) { + runState["beta"]++ + t.Errorf("Beta is failing, because %v", exampleJira) + }) + + r.Run("gamma", func(t *testing.T) { + t.Log("Gamma is OK") + runState["gamma"]++ + }) +} diff --git a/test/e2e/knativeservingdeployment.go b/test/e2e/knativeservingdeployment.go new file mode 100644 index 00000000..c6a9afb8 --- /dev/null +++ b/test/e2e/knativeservingdeployment.go @@ -0,0 +1,256 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "errors" + "fmt" + "path/filepath" + "runtime" + "testing" + + mf "github.com/jcrossley3/manifestival" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logstream" + "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" + "knative.dev/serving-operator/test" + "knative.dev/serving-operator/test/resources" +) + +func testKnativeServingDeployment(ctx *test.Context) { + + t := ctx.T() + r := ctx.Runner() + cancel := logstream.Start(t) + defer cancel() + clients := Setup(t) + + names := test.ResourceNames{ + KnativeServing: test.ServingOperatorName, + Namespace: test.ServingOperatorNamespace, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Create a KnativeServing + if _, err := resources.CreateKnativeServing(clients.KnativeServing(), names); err != nil { + t.Fatalf("KnativeService %q failed to create: %v", names.KnativeServing, err) + } + + // Test if KnativeServing can reach the READY status + r.Run("create", func(t *testing.T) { + knativeServingVerify(t, clients, names) + }) + + r.Run("configure", func(t *testing.T) { + knativeServingVerify(t, clients, names) + knativeServingConfigure(t, clients, names) + }) + + // Delete the deployments one by one to see if they will be recreated. + r.Run("restore", func(t *testing.T) { + knativeServingVerify(t, clients, names) + deploymentRecreation(t, clients, names) + }) + + // Delete the KnativeServing to see if all resources will be removed + r.Run("delete", func(t *testing.T) { + knativeServingVerify(t, clients, names) + knativeServingDelete(t, clients, names) + }) +} + +// knativeServingVerify verifies if the KnativeServing can reach the READY status. +func knativeServingVerify(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, + resources.IsKnativeServingReady); err != nil { + t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) + } + +} + +// knativeServingConfigure verifies that KnativeServing config is set properly +func knativeServingConfigure(t *testing.T, clients *test.Clients, names test.ResourceNames) { + // We'll arbitrarily choose logging and defaults config + loggingConfigKey := "logging" + loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, loggingConfigKey) + defaultsConfigKey := "defaults" + defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, defaultsConfigKey) + // Get the existing KS without any spec + ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + // Add config to its spec + ks.Spec = v1alpha1.KnativeServingSpec{ + Config: map[string]map[string]string{ + defaultsConfigKey: { + "revision-timeout-seconds": "200", + }, + loggingConfigKey: { + "loglevel.controller": "debug", + "loglevel.autoscaler": "debug", + }, + }, + } + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmaps have been updated + err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["revision-timeout-seconds"] == "200" + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) + } + err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["loglevel.controller"] == "debug" && m["loglevel.autoscaler"] == "debug" + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) + } + + // Delete a single key/value pair + delete(ks.Spec.Config[loggingConfigKey], "loglevel.autoscaler") + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmap has been updated + err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, autoscalerKeyExists := m["loglevel.autoscaler"] + // deleted key/value pair should be removed from the target config map + return m["loglevel.controller"] == "debug" && !autoscalerKeyExists + }) + if err != nil { + t.Fatal("The operator failed to update the configmap") + } + + // Use an empty map as the value + ks.Spec.Config[defaultsConfigKey] = map[string]string{} + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmap has been updated and does not contain any keys except "_example" + err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exampleExists := m["_example"] + return len(m) == 1 && exampleExists + }) + if err != nil { + t.Fatal("The operator failed to update the configmap") + } + + // Now remove the config from the spec and update + ks.Spec = v1alpha1.KnativeServingSpec{} + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // And verify the configmap entry is gone + err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exists := m["loglevel.controller"] + return !exists + }) + if err != nil { + t.Fatal("The operator failed to revert the configmap") + } +} + +// deploymentRecreation verify whether all the deployments for knative serving are able to recreate, when they are deleted. +func deploymentRecreation(t *testing.T, clients *test.Clients, names test.ResourceNames) { + dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to get any deployment under the namespace %q: %v", + test.ServingOperatorNamespace, err) + } + if len(dpList.Items) == 0 { + t.Fatalf("No deployment under the namespace %q was found", + test.ServingOperatorNamespace) + } + // Delete the first deployment and verify the operator recreates it + deployment := dpList.Items[0] + if err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name, + &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + } + + waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { + dep, err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) + if err != nil { + // If the deployment is not found, we continue to wait for the availability. + if apierrs.IsNotFound(err) { + return false, nil + } + return false, err + } + return resources.IsDeploymentAvailable(dep) + }) + + if waitErr != nil { + t.Fatalf("The deployment %s/%s failed to reach the desired state: %v", deployment.Namespace, deployment.Name, err) + } + + if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, + resources.IsKnativeServingReady); err != nil { + t.Fatalf("KnativeService %q failed to reach the desired state: %v", test.ServingOperatorName, err) + } + t.Logf("The deployment %s/%s reached the desired state.", deployment.Namespace, deployment.Name) +} + +// knativeServingDelete deletes tha KnativeServing to see if all resources will be deleted +func knativeServingDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if err := clients.KnativeServing().Delete(names.KnativeServing, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("KnativeServing %q failed to delete: %v", names.KnativeServing, err) + } + _, b, _, _ := runtime.Caller(0) + m, err := mf.NewManifest(filepath.Join((filepath.Dir(b)+"/.."), "config/"), false, clients.Config) + if err != nil { + t.Fatal("Failed to load manifest", err) + } + if err := verifyNoKnativeServings(clients); err != nil { + t.Fatal(err) + } + for _, u := range m.Resources { + if u.GetKind() == "Namespace" { + // The namespace should be skipped, because when the CR is removed, the Manifest to be removed has + // been modified, since the namespace can be injected. + continue + } + waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { + gvrs, _ := meta.UnsafeGuessKindToResource(u.GroupVersionKind()) + if _, err := clients.Dynamic.Resource(gvrs).Get(u.GetName(), metav1.GetOptions{}); apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }) + + if waitErr != nil { + t.Fatalf("The %s %s failed to be deleted: %v", u.GetKind(), u.GetName(), waitErr) + } + t.Logf("The %s %s has been deleted.", u.GetKind(), u.GetName()) + } +} + +func verifyNoKnativeServings(clients *test.Clients) error { + servings, err := clients.KnativeServingAll().List(metav1.ListOptions{}) + if err != nil { + return err + } + if len(servings.Items) > 0 { + return errors.New("Unable to verify cluster-scoped resources are deleted if any KnativeServing exists") + } + return nil +} diff --git a/test/e2e/knativeservingdeployment_test.go b/test/e2e/knativeservingdeployment_test.go index ed985a1a..ce3171e8 100644 --- a/test/e2e/knativeservingdeployment_test.go +++ b/test/e2e/knativeservingdeployment_test.go @@ -16,241 +16,12 @@ limitations under the License. package e2e import ( - "errors" - "fmt" - "path/filepath" - "runtime" - "testing" - - mf "github.com/jcrossley3/manifestival" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "knative.dev/pkg/test/logstream" - "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" "knative.dev/serving-operator/test" - "knative.dev/serving-operator/test/resources" + "testing" ) // TestKnativeServingDeployment verifies the KnativeServing creation, deployment recreation, and KnativeServing deletion. func TestKnativeServingDeployment(t *testing.T) { - cancel := logstream.Start(t) - defer cancel() - clients := Setup(t) - - names := test.ResourceNames{ - KnativeServing: test.ServingOperatorName, - Namespace: test.ServingOperatorNamespace, - } - - test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) - defer test.TearDown(clients, names) - - // Create a KnativeServing - if _, err := resources.CreateKnativeServing(clients.KnativeServing(), names); err != nil { - t.Fatalf("KnativeService %q failed to create: %v", names.KnativeServing, err) - } - - // Test if KnativeServing can reach the READY status - t.Run("create", func(t *testing.T) { - knativeServingVerify(t, clients, names) - }) - - t.Run("configure", func(t *testing.T) { - knativeServingVerify(t, clients, names) - knativeServingConfigure(t, clients, names) - }) - - // Delete the deployments one by one to see if they will be recreated. - t.Run("restore", func(t *testing.T) { - knativeServingVerify(t, clients, names) - deploymentRecreation(t, clients, names) - }) - - // Delete the KnativeServing to see if all resources will be removed - t.Run("delete", func(t *testing.T) { - knativeServingVerify(t, clients, names) - knativeServingDelete(t, clients, names) - }) -} - -// knativeServingVerify verifies if the KnativeServing can reach the READY status. -func knativeServingVerify(t *testing.T, clients *test.Clients, names test.ResourceNames) { - if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, - resources.IsKnativeServingReady); err != nil { - t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) - } - -} - -// knativeServingConfigure verifies that KnativeServing config is set properly -func knativeServingConfigure(t *testing.T, clients *test.Clients, names test.ResourceNames) { - // We'll arbitrarily choose logging and defaults config - loggingConfigKey := "logging" - loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, loggingConfigKey) - defaultsConfigKey := "defaults" - defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, defaultsConfigKey) - // Get the existing KS without any spec - ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) - // Add config to its spec - ks.Spec = v1alpha1.KnativeServingSpec{ - Config: map[string]map[string]string{ - defaultsConfigKey: { - "revision-timeout-seconds": "200", - }, - loggingConfigKey: { - "loglevel.controller": "debug", - "loglevel.autoscaler": "debug", - }, - }, - } - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // Verify the relevant configmaps have been updated - err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - return m["revision-timeout-seconds"] == "200" - }) - if err != nil { - t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) - } - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - return m["loglevel.controller"] == "debug" && m["loglevel.autoscaler"] == "debug" - }) - if err != nil { - t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) - } - - // Delete a single key/value pair - delete(ks.Spec.Config[loggingConfigKey], "loglevel.autoscaler") - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // Verify the relevant configmap has been updated - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - _, autoscalerKeyExists := m["loglevel.autoscaler"] - // deleted key/value pair should be removed from the target config map - return m["loglevel.controller"] == "debug" && !autoscalerKeyExists - }) - if err != nil { - t.Fatal("The operator failed to update the configmap") - } - - // Use an empty map as the value - ks.Spec.Config[defaultsConfigKey] = map[string]string{} - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // Verify the relevant configmap has been updated and does not contain any keys except "_example" - err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - _, exampleExists := m["_example"] - return len(m) == 1 && exampleExists - }) - if err != nil { - t.Fatal("The operator failed to update the configmap") - } - - // Now remove the config from the spec and update - ks.Spec = v1alpha1.KnativeServingSpec{} - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // And verify the configmap entry is gone - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - _, exists := m["loglevel.controller"] - return !exists - }) - if err != nil { - t.Fatal("The operator failed to revert the configmap") - } -} - -// deploymentRecreation verify whether all the deployments for knative serving are able to recreate, when they are deleted. -func deploymentRecreation(t *testing.T, clients *test.Clients, names test.ResourceNames) { - dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) - if err != nil { - t.Fatalf("Failed to get any deployment under the namespace %q: %v", - test.ServingOperatorNamespace, err) - } - if len(dpList.Items) == 0 { - t.Fatalf("No deployment under the namespace %q was found", - test.ServingOperatorNamespace) - } - // Delete the first deployment and verify the operator recreates it - deployment := dpList.Items[0] - if err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name, - &metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) - } - - waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { - dep, err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) - if err != nil { - // If the deployment is not found, we continue to wait for the availability. - if apierrs.IsNotFound(err) { - return false, nil - } - return false, err - } - return resources.IsDeploymentAvailable(dep) - }) - - if waitErr != nil { - t.Fatalf("The deployment %s/%s failed to reach the desired state: %v", deployment.Namespace, deployment.Name, err) - } - - if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, - resources.IsKnativeServingReady); err != nil { - t.Fatalf("KnativeService %q failed to reach the desired state: %v", test.ServingOperatorName, err) - } - t.Logf("The deployment %s/%s reached the desired state.", deployment.Namespace, deployment.Name) -} - -// knativeServingDelete deletes tha KnativeServing to see if all resources will be deleted -func knativeServingDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { - if err := clients.KnativeServing().Delete(names.KnativeServing, &metav1.DeleteOptions{}); err != nil { - t.Fatalf("KnativeServing %q failed to delete: %v", names.KnativeServing, err) - } - _, b, _, _ := runtime.Caller(0) - m, err := mf.NewManifest(filepath.Join((filepath.Dir(b)+"/.."), "config/"), false, clients.Config) - if err != nil { - t.Fatal("Failed to load manifest", err) - } - if err := verifyNoKnativeServings(clients); err != nil { - t.Fatal(err) - } - for _, u := range m.Resources { - if u.GetKind() == "Namespace" { - // The namespace should be skipped, because when the CR is removed, the Manifest to be removed has - // been modified, since the namespace can be injected. - continue - } - waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { - gvrs, _ := meta.UnsafeGuessKindToResource(u.GroupVersionKind()) - if _, err := clients.Dynamic.Resource(gvrs).Get(u.GetName(), metav1.GetOptions{}); apierrs.IsNotFound(err) { - return true, nil - } - return false, err - }) - - if waitErr != nil { - t.Fatalf("The %s %s failed to be deleted: %v", u.GetKind(), u.GetName(), waitErr) - } - t.Logf("The %s %s has been deleted.", u.GetKind(), u.GetName()) - } -} - -func verifyNoKnativeServings(clients *test.Clients) error { - servings, err := clients.KnativeServingAll().List(metav1.ListOptions{}) - if err != nil { - return err - } - if len(servings.Items) > 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any KnativeServing exists") - } - return nil + ctx := test.NewContext(t) + testKnativeServingDeployment(ctx) } diff --git a/test/e2e_flags.go b/test/e2e_flags.go index f7dc4f3f..e6b8b102 100644 --- a/test/e2e_flags.go +++ b/test/e2e_flags.go @@ -19,9 +19,19 @@ limitations under the License. package test -const ( +import "os" + +var ( // ServingOperatorNamespace is the default namespace for serving operator e2e tests - ServingOperatorNamespace = "operator-tests" + ServingOperatorNamespace = getenv("TEST_NAMESPACE", "operator-tests") // ServingOperatorName is the default operator name for serving operator e2e tests - ServingOperatorName = "knative-serving" + ServingOperatorName = getenv("TEST_RESOURCE", "knative-serving") ) + +func getenv(name, defaultValue string) string { + value, set := os.LookupEnv(name) + if !set { + value = defaultValue + } + return value +} diff --git a/test/runner.go b/test/runner.go new file mode 100644 index 00000000..10375557 --- /dev/null +++ b/test/runner.go @@ -0,0 +1,35 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import "testing" + +// Runner is a test runner that takes a context into consideration +type Runner interface { + Run(name string, testfunc func(t *testing.T)) bool +} + +func (spec Specification) run(ctx *Context) bool { + tt := ctx.t + if spec.contextual() { + cspec := spec.contextSpec + return tt.Run(cspec.name, func(t *testing.T) { + ctx.t = t + ctx.push(cspec.name) + defer ctx.pop() + cspec.testfunc(ctx) + }) + } + return tt.Run(spec.regularSpec.name, spec.regularSpec.testfunc) +} diff --git a/test/spec.go b/test/spec.go new file mode 100644 index 00000000..7a2ed309 --- /dev/null +++ b/test/spec.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" +) + +// Specification describes a test with name and method that will be use as test +type Specification struct { + regularSpec *regularSpecification + contextSpec *contextualSpecification +} + +// NewSpec creates a new test specification +func NewSpec(name string, testfunc func(t *testing.T)) Specification { + return Specification{ + regularSpec: ®ularSpecification{ + name: name, + testfunc: testfunc, + }, + contextSpec: nil, + } +} + +// NewContextualSpec creates a new test specification that uses context +func NewContextualSpec(name string, testfunc func(ctx *Context)) Specification { + return Specification{ + regularSpec: nil, + contextSpec: &contextualSpecification{ + name: name, + testfunc: testfunc, + }, + } +} + +// Skip will skip a test by it's composite name +func Skip(testname string, args ...interface{}) Specification { + return NewSpec(testname, func(t *testing.T) { + t.Skip(args) + }) +} + +// Skipf will skip a test by it's composite name +func Skipf(testname string, format string, args ...interface{}) Specification { + return NewSpec(testname, func(t *testing.T) { + t.Skipf(format, args) + }) +} + +type regularSpecification struct { + name string + testfunc func(t *testing.T) +} + +type contextualSpecification struct { + name string + testfunc func(ctx *Context) +} + +func (spec Specification) contextual() bool { + return spec.regularSpec == nil && spec.contextSpec != nil +} + +func (spec Specification) name() string { + if spec.contextual() { + return spec.contextSpec.name + } + return spec.regularSpec.name +} + +func (spec Specification) matchesNameInContext(name string, ctx *Context) bool { + candidate := strings.Join(ctx.activeTests[:], "/") + "/" + name + return spec.name() == candidate +} + +func (spec Specification) testfunc(ctx *Context) func(t *testing.T) { + if spec.contextual() { + return func(t *testing.T) { + spec.contextSpec.testfunc(ctx) + } + } + return spec.regularSpec.testfunc +}