From 58a70a4afed7966b8bdfb8ec7b3ca064406ef10b Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Wed, 5 Feb 2020 13:18:49 -0500 Subject: [PATCH 1/4] Add upgrade tests into the e2e-tests-latest-serving This PR adds the tests to verify the correct number and names of knative serving deployments. The test tag postupgrade is added, marking the tests to run after upgrade to the latest HEAD of operator, with the latest generated manifest of knative serving. --- test/common/common.go | 230 +++++++++++++++++++++ test/e2e-upgrade-tests.sh | 6 +- test/e2e/e2e.go | 36 ---- test/e2e/knativeservingdeployment_test.go | 211 +------------------ test/upgrade/knativeservingupgrade_test.go | 98 +++++++++ 5 files changed, 342 insertions(+), 239 deletions(-) create mode 100644 test/common/common.go delete mode 100644 test/e2e/e2e.go create mode 100644 test/upgrade/knativeservingupgrade_test.go diff --git a/test/common/common.go b/test/common/common.go new file mode 100644 index 00000000..f8fb7240 --- /dev/null +++ b/test/common/common.go @@ -0,0 +1,230 @@ +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "errors" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" + "knative.dev/serving-operator/test/resources" + "path/filepath" + "runtime" + "testing" + + mf "github.com/jcrossley3/manifestival" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + // Mysteriously required to support GCP auth (required by k8s libs). + // Apparently just importing it is enough. @_@ side effects @_@. + // https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + pkgTest "knative.dev/pkg/test" + "knative.dev/serving-operator/test" +) + +// Setup creates the client objects needed in the e2e tests. +func Setup(t *testing.T) *test.Clients { + clients, err := test.NewClients( + pkgTest.Flags.Kubeconfig, + pkgTest.Flags.Cluster) + if err != nil { + t.Fatalf("Couldn't initialize clients: %v", err) + } + return clients +} + + +// KnativeServingVerify verifies if the KnativeServing can reach the READY status. +func KnativeServingVerify(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, + resources.IsKnativeServingReady); err != nil { + t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) + } + +} + +// KnativeServingConfigure verifies that KnativeServing config is set properly +func KnativeServingConfigure(t *testing.T, clients *test.Clients, names test.ResourceNames) { + // We'll arbitrarily choose logging and defaults config + loggingConfigKey := "logging" + loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, loggingConfigKey) + defaultsConfigKey := "defaults" + defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, defaultsConfigKey) + // Get the existing KS without any spec + ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + // Add config to its spec + ks.Spec = v1alpha1.KnativeServingSpec{ + Config: map[string]map[string]string{ + defaultsConfigKey: { + "revision-timeout-seconds": "200", + }, + loggingConfigKey: { + "loglevel.controller": "debug", + "loglevel.autoscaler": "debug", + }, + }, + } + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmaps have been updated + err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["revision-timeout-seconds"] == "200" + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) + } + err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["loglevel.controller"] == "debug" && m["loglevel.autoscaler"] == "debug" + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) + } + + // Delete a single key/value pair + delete(ks.Spec.Config[loggingConfigKey], "loglevel.autoscaler") + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmap has been updated + err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, autoscalerKeyExists := m["loglevel.autoscaler"] + // deleted key/value pair should be removed from the target config map + return m["loglevel.controller"] == "debug" && !autoscalerKeyExists + }) + if err != nil { + t.Fatal("The operator failed to update the configmap") + } + + // Use an empty map as the value + ks.Spec.Config[defaultsConfigKey] = map[string]string{} + // Update it + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmap has been updated and does not contain any keys except "_example" + err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exampleExists := m["_example"] + return len(m) == 1 && exampleExists + }) + if err != nil { + t.Fatal("The operator failed to update the configmap") + } + + // Now remove the config from the spec and update + ks.Spec = v1alpha1.KnativeServingSpec{} + if ks, err = clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // And verify the configmap entry is gone + err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exists := m["loglevel.controller"] + return !exists + }) + if err != nil { + t.Fatal("The operator failed to revert the configmap") + } +} + +// DeploymentRecreation verify whether all the deployments for knative serving are able to recreate, when they are deleted. +func DeploymentRecreation(t *testing.T, clients *test.Clients, names test.ResourceNames) { + dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to get any deployment under the namespace %q: %v", + test.ServingOperatorNamespace, err) + } + if len(dpList.Items) == 0 { + t.Fatalf("No deployment under the namespace %q was found", + test.ServingOperatorNamespace) + } + // Delete the first deployment and verify the operator recreates it + deployment := dpList.Items[0] + if err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name, + &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + } + + waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { + dep, err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) + if err != nil { + // If the deployment is not found, we continue to wait for the availability. + if apierrs.IsNotFound(err) { + return false, nil + } + return false, err + } + return resources.IsDeploymentAvailable(dep) + }) + + if waitErr != nil { + t.Fatalf("The deployment %s/%s failed to reach the desired state: %v", deployment.Namespace, deployment.Name, err) + } + + if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, + resources.IsKnativeServingReady); err != nil { + t.Fatalf("KnativeService %q failed to reach the desired state: %v", test.ServingOperatorName, err) + } + t.Logf("The deployment %s/%s reached the desired state.", deployment.Namespace, deployment.Name) +} + +// KnativeServingDelete deletes tha KnativeServing to see if all resources will be deleted +func KnativeServingDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if err := clients.KnativeServing().Delete(names.KnativeServing, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("KnativeServing %q failed to delete: %v", names.KnativeServing, err) + } + err := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { + _, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }) + if err != nil { + t.Fatal("Timed out waiting on KnativeServing to delete", err) + } + _, b, _, _ := runtime.Caller(0) + m, err := mf.NewManifest(filepath.Join((filepath.Dir(b)+"/.."), "config/"), false, clients.Config) + if err != nil { + t.Fatal("Failed to load manifest", err) + } + if err := verifyNoKnativeServings(clients); err != nil { + t.Fatal(err) + } + for _, u := range m.Resources { + if u.GetKind() == "Namespace" { + // The namespace should be skipped, because when the CR is removed, the Manifest to be removed has + // been modified, since the namespace can be injected. + continue + } + gvrs, _ := meta.UnsafeGuessKindToResource(u.GroupVersionKind()) + if _, err := clients.Dynamic.Resource(gvrs).Get(u.GetName(), metav1.GetOptions{}); !apierrs.IsNotFound(err) { + t.Fatalf("The %s %s failed to be deleted: %v", u.GetKind(), u.GetName(), err) + } + } +} + +func verifyNoKnativeServings(clients *test.Clients) error { + servings, err := clients.KnativeServingAll().List(metav1.ListOptions{}) + if err != nil { + return err + } + if len(servings.Items) > 0 { + return errors.New("Unable to verify cluster-scoped resources are deleted if any KnativeServing exists") + } + return nil +} diff --git a/test/e2e-upgrade-tests.sh b/test/e2e-upgrade-tests.sh index 2585490f..44a5523f 100755 --- a/test/e2e-upgrade-tests.sh +++ b/test/e2e-upgrade-tests.sh @@ -74,12 +74,14 @@ function generate_latest_serving_manifest() { # Skip installing istio as an add-on initialize $@ --skip-istio-addon +TIMEOUT=20m + # If we got this far, the operator installed Knative Serving of the latest source code. header "Running tests for Knative Serving Operator" failed=0 -# Run the integration tests -go_test_e2e -timeout=20m ./test/e2e || failed=1 +# Run the postupgrade tests +go_test_e2e -tags=postupgrade -timeout=${TIMEOUT} ./test/upgrade || failed=1 # Require that tests succeeded. (( failed )) && fail_test diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go deleted file mode 100644 index a2385dbc..00000000 --- a/test/e2e/e2e.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2019 The Knative Authors -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "testing" - - // Mysteriously required to support GCP auth (required by k8s libs). - // Apparently just importing it is enough. @_@ side effects @_@. - // https://github.com/kubernetes/client-go/issues/242 - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - pkgTest "knative.dev/pkg/test" - "knative.dev/serving-operator/test" -) - -// Setup creates the client objects needed in the e2e tests. -func Setup(t *testing.T) *test.Clients { - clients, err := test.NewClients( - pkgTest.Flags.Kubeconfig, - pkgTest.Flags.Cluster) - if err != nil { - t.Fatalf("Couldn't initialize clients: %v", err) - } - return clients -} diff --git a/test/e2e/knativeservingdeployment_test.go b/test/e2e/knativeservingdeployment_test.go index fc39629a..6fd0d6d7 100644 --- a/test/e2e/knativeservingdeployment_test.go +++ b/test/e2e/knativeservingdeployment_test.go @@ -1,7 +1,7 @@ // +build e2e /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,28 +16,19 @@ limitations under the License. package e2e import ( - "errors" - "fmt" - "path/filepath" - "runtime" "testing" - mf "github.com/jcrossley3/manifestival" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" "knative.dev/pkg/test/logstream" - "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" "knative.dev/serving-operator/test" "knative.dev/serving-operator/test/resources" + "knative.dev/serving-operator/test/common" ) // TestKnativeServingDeployment verifies the KnativeServing creation, deployment recreation, and KnativeServing deletion. func TestKnativeServingDeployment(t *testing.T) { cancel := logstream.Start(t) defer cancel() - clients := Setup(t) + clients := common.Setup(t) names := test.ResourceNames{ KnativeServing: test.ServingOperatorName, @@ -54,205 +45,23 @@ func TestKnativeServingDeployment(t *testing.T) { // Test if KnativeServing can reach the READY status t.Run("create", func(t *testing.T) { - knativeServingVerify(t, clients, names) + common.KnativeServingVerify(t, clients, names) }) t.Run("configure", func(t *testing.T) { - knativeServingVerify(t, clients, names) - knativeServingConfigure(t, clients, names) + common.KnativeServingVerify(t, clients, names) + common.KnativeServingConfigure(t, clients, names) }) // Delete the deployments one by one to see if they will be recreated. t.Run("restore", func(t *testing.T) { - knativeServingVerify(t, clients, names) - deploymentRecreation(t, clients, names) + common.KnativeServingVerify(t, clients, names) + common.DeploymentRecreation(t, clients, names) }) // Delete the KnativeServing to see if all resources will be removed t.Run("delete", func(t *testing.T) { - knativeServingVerify(t, clients, names) - knativeServingDelete(t, clients, names) + common.KnativeServingVerify(t, clients, names) + common.KnativeServingDelete(t, clients, names) }) } - -// knativeServingVerify verifies if the KnativeServing can reach the READY status. -func knativeServingVerify(t *testing.T, clients *test.Clients, names test.ResourceNames) { - if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, - resources.IsKnativeServingReady); err != nil { - t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) - } - -} - -// knativeServingConfigure verifies that KnativeServing config is set properly -func knativeServingConfigure(t *testing.T, clients *test.Clients, names test.ResourceNames) { - // We'll arbitrarily choose logging and defaults config - loggingConfigKey := "logging" - loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, loggingConfigKey) - defaultsConfigKey := "defaults" - defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, defaultsConfigKey) - // Get the existing KS without any spec - ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) - // Add config to its spec - ks.Spec = v1alpha1.KnativeServingSpec{ - Config: map[string]map[string]string{ - defaultsConfigKey: { - "revision-timeout-seconds": "200", - }, - loggingConfigKey: { - "loglevel.controller": "debug", - "loglevel.autoscaler": "debug", - }, - }, - } - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // Verify the relevant configmaps have been updated - err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - return m["revision-timeout-seconds"] == "200" - }) - if err != nil { - t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) - } - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - return m["loglevel.controller"] == "debug" && m["loglevel.autoscaler"] == "debug" - }) - if err != nil { - t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) - } - - // Delete a single key/value pair - delete(ks.Spec.Config[loggingConfigKey], "loglevel.autoscaler") - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // Verify the relevant configmap has been updated - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - _, autoscalerKeyExists := m["loglevel.autoscaler"] - // deleted key/value pair should be removed from the target config map - return m["loglevel.controller"] == "debug" && !autoscalerKeyExists - }) - if err != nil { - t.Fatal("The operator failed to update the configmap") - } - - // Use an empty map as the value - ks.Spec.Config[defaultsConfigKey] = map[string]string{} - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // Verify the relevant configmap has been updated and does not contain any keys except "_example" - err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - _, exampleExists := m["_example"] - return len(m) == 1 && exampleExists - }) - if err != nil { - t.Fatal("The operator failed to update the configmap") - } - - // Now remove the config from the spec and update - ks.Spec = v1alpha1.KnativeServingSpec{} - if ks, err = clients.KnativeServing().Update(ks); err != nil { - t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) - } - // And verify the configmap entry is gone - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { - _, exists := m["loglevel.controller"] - return !exists - }) - if err != nil { - t.Fatal("The operator failed to revert the configmap") - } -} - -// deploymentRecreation verify whether all the deployments for knative serving are able to recreate, when they are deleted. -func deploymentRecreation(t *testing.T, clients *test.Clients, names test.ResourceNames) { - dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) - if err != nil { - t.Fatalf("Failed to get any deployment under the namespace %q: %v", - test.ServingOperatorNamespace, err) - } - if len(dpList.Items) == 0 { - t.Fatalf("No deployment under the namespace %q was found", - test.ServingOperatorNamespace) - } - // Delete the first deployment and verify the operator recreates it - deployment := dpList.Items[0] - if err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name, - &metav1.DeleteOptions{}); err != nil { - t.Fatalf("Failed to delete deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) - } - - waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { - dep, err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) - if err != nil { - // If the deployment is not found, we continue to wait for the availability. - if apierrs.IsNotFound(err) { - return false, nil - } - return false, err - } - return resources.IsDeploymentAvailable(dep) - }) - - if waitErr != nil { - t.Fatalf("The deployment %s/%s failed to reach the desired state: %v", deployment.Namespace, deployment.Name, err) - } - - if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, - resources.IsKnativeServingReady); err != nil { - t.Fatalf("KnativeService %q failed to reach the desired state: %v", test.ServingOperatorName, err) - } - t.Logf("The deployment %s/%s reached the desired state.", deployment.Namespace, deployment.Name) -} - -// knativeServingDelete deletes tha KnativeServing to see if all resources will be deleted -func knativeServingDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { - if err := clients.KnativeServing().Delete(names.KnativeServing, &metav1.DeleteOptions{}); err != nil { - t.Fatalf("KnativeServing %q failed to delete: %v", names.KnativeServing, err) - } - err := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { - _, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) - if apierrs.IsNotFound(err) { - return true, nil - } - return false, err - }) - if err != nil { - t.Fatal("Timed out waiting on KnativeServing to delete", err) - } - _, b, _, _ := runtime.Caller(0) - m, err := mf.NewManifest(filepath.Join((filepath.Dir(b)+"/.."), "config/"), false, clients.Config) - if err != nil { - t.Fatal("Failed to load manifest", err) - } - if err := verifyNoKnativeServings(clients); err != nil { - t.Fatal(err) - } - for _, u := range m.Resources { - if u.GetKind() == "Namespace" { - // The namespace should be skipped, because when the CR is removed, the Manifest to be removed has - // been modified, since the namespace can be injected. - continue - } - gvrs, _ := meta.UnsafeGuessKindToResource(u.GroupVersionKind()) - if _, err := clients.Dynamic.Resource(gvrs).Get(u.GetName(), metav1.GetOptions{}); !apierrs.IsNotFound(err) { - t.Fatalf("The %s %s failed to be deleted: %v", u.GetKind(), u.GetName(), err) - } - } -} - -func verifyNoKnativeServings(clients *test.Clients) error { - servings, err := clients.KnativeServingAll().List(metav1.ListOptions{}) - if err != nil { - return err - } - if len(servings.Items) > 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any KnativeServing exists") - } - return nil -} diff --git a/test/upgrade/knativeservingupgrade_test.go b/test/upgrade/knativeservingupgrade_test.go new file mode 100644 index 00000000..e418cf0d --- /dev/null +++ b/test/upgrade/knativeservingupgrade_test.go @@ -0,0 +1,98 @@ +// +build postupgrade + +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/test/logstream" + "knative.dev/serving-operator/test" + "knative.dev/serving-operator/test/resources" + "knative.dev/serving-operator/test/common" +) + +// TestKnativeServingUpgrade verifies the KnativeServing creation, deployment recreation, and KnativeServing deletion. +func TestKnativeServingUpgrade(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + clients := common.Setup(t) + + names := test.ResourceNames{ + KnativeServing: test.ServingOperatorName, + Namespace: test.ServingOperatorNamespace, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Create a KnativeServing + if _, err := resources.CreateKnativeServing(clients.KnativeServing(), names); err != nil { + t.Fatalf("KnativeService %q failed to create: %v", names.KnativeServing, err) + } + + // Test if KnativeServing can reach the READY status + t.Run("create", func(t *testing.T) { + common.KnativeServingVerify(t, clients, names) + knativeServingVerifyDeployment(t, clients, names) + }) + + t.Run("configure", func(t *testing.T) { + common.KnativeServingVerify(t, clients, names) + common.KnativeServingConfigure(t, clients, names) + }) + + // Delete the deployments one by one to see if they will be recreated. + t.Run("restore", func(t *testing.T) { + common.KnativeServingVerify(t, clients, names) + common.DeploymentRecreation(t, clients, names) + }) + + // Delete the KnativeServing to see if all resources will be removed + t.Run("delete", func(t *testing.T) { + common.KnativeServingVerify(t, clients, names) + common.KnativeServingDelete(t, clients, names) + }) +} + +// knativeServingVerifyDeployment verify whether the deployments have the correct number and names. +func knativeServingVerifyDeployment(t *testing.T, clients *test.Clients, names test.ResourceNames) { + // Knative Serving has 6 deployments. + expectedNumDeployments := 6 + deploys := []string{"networking-istio", "webhook", "controller", "activator", "autoscaler-hpa", "autoscaler"} + dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) + assertEqual(t, err, nil) + assertEqual(t, expectedNumDeployments, len(dpList.Items)) + for _, deployment := range dpList.Items { + assertEqual(t, stringInList(deployment.Name, deploys), true) + } +} + +func assertEqual(t *testing.T, actual, expected interface{}) { + if actual == expected { + return + } + t.Fatalf("expected does not equal actual. \nExpected: %v\nActual: %v", expected, actual) +} + +func stringInList(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} From 707a6e93566bec8cde286f44db8e1e85d436bc64 Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Wed, 5 Feb 2020 14:03:38 -0500 Subject: [PATCH 2/4] Install the latest release of the operator and then upgrade --- test/e2e-common.sh | 21 +++++---- test/e2e-tests.sh | 1 + test/e2e-upgrade-tests.sh | 44 ++++++++++++++++++- test/resources/knativeserving.go | 19 +++++--- ...go => servingoperator_postupgrade_test.go} | 36 +++++++-------- 5 files changed, 86 insertions(+), 35 deletions(-) rename test/upgrade/{knativeservingupgrade_test.go => servingoperator_postupgrade_test.go} (66%) diff --git a/test/e2e-common.sh b/test/e2e-common.sh index a0ae6af8..f54a6af5 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -20,9 +20,9 @@ source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/e2e-tests.sh # Latest serving release. This is intentionally hardcoded for now, but # will need the ability to test against the latest successful serving # CI runs in the future. -readonly LATEST_SERVING_RELEASE_VERSION=0.6.0 +readonly LATEST_SERVING_RELEASE_VERSION=$(git describe --match "v[0-9]*" --abbrev=0) # Istio version we test with -readonly ISTIO_VERSION=1.1.3 +readonly ISTIO_VERSION="1.4.2" # Test without Istio mesh enabled readonly ISTIO_MESH=0 # Namespace used for tests @@ -43,18 +43,20 @@ function istio_yaml() { local istio_mesh=$2 local suffix="" if [[ $istio_mesh -eq 0 ]]; then - suffix="-lean" + suffix="ci-no-mesh" + else + suffix="ci-mesh" fi - echo "third_party/istio-${istio_version}/istio${suffix}.yaml" + echo "third_party/istio-${istio_version}/istio-${suffix}.yaml" } # Install Istio. function install_istio() { - local base_url="https://raw.githubusercontent.com/knative/serving/v${LATEST_SERVING_RELEASE_VERSION}" + local base_url="https://raw.githubusercontent.com/knative/serving/${LATEST_SERVING_RELEASE_VERSION}" # Decide the Istio configuration to install. if [[ -z "$ISTIO_VERSION" ]]; then - # Defaults to 1.1-latest - ISTIO_VERSION=1.1-latest + # Defaults to 1.4-latest + ISTIO_VERSION="1.4-latest" fi if [[ -z "$ISTIO_MESH" ]]; then # Defaults to using mesh. @@ -76,10 +78,13 @@ function install_istio() { kubectl apply -f "${INSTALL_ISTIO_YAML}" || return 1 } -function install_serving_operator() { +function create_namespace() { echo ">> Creating test namespaces" + # All the custom resources and Knative Serving resources are created under this TEST_NAMESPACE. kubectl create namespace $TEST_NAMESPACE +} +function install_serving_operator() { header "Installing Knative Serving operator" # Deploy the operator ko apply -f config/ diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 52ce31fb..0f1c3163 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -30,6 +30,7 @@ source $(dirname $0)/e2e-common.sh function knative_setup() { install_istio || fail_test "Istio installation failed" + create_namespace install_serving_operator } diff --git a/test/e2e-upgrade-tests.sh b/test/e2e-upgrade-tests.sh index 44a5523f..ad5362ec 100755 --- a/test/e2e-upgrade-tests.sh +++ b/test/e2e-upgrade-tests.sh @@ -36,8 +36,48 @@ source $(dirname $0)/e2e-common.sh OPERATOR_DIR=$(dirname $0)/.. KNATIVE_SERVING_DIR=${OPERATOR_DIR}/.. -function knative_setup() { +function install_latest_operator_release() { + header "Installing Knative Serving operator latest public release" + local url="https://github.com/knative/serving-operator/releases/download/${LATEST_SERVING_RELEASE_VERSION}" + local yaml="serving-operator.yaml" + + local RELEASE_YAML="$(mktemp)" + wget "${url}/${yaml}" -O "${RELEASE_YAML}" \ + || fail_test "Unable to download latest Knative Serving Operator release." + install_istio || fail_test "Istio installation failed" + kubectl apply -f "${RELEASE_YAML}" || fail_test "Knative Serving Operator latest release installation failed" + create_custom_resource + wait_until_pods_running ${TEST_NAMESPACE} +} + +function create_custom_resource() { + echo ">> Creating the custom resource of Knative Serving:" + cat < Date: Mon, 10 Feb 2020 12:07:27 -0500 Subject: [PATCH 3/4] Split the long functions and rename the common package --- test/client/setup.go | 37 +++++ test/e2e/knativeservingdeployment_test.go | 18 +-- test/resources/knativeserving.go | 23 ++- .../{common/common.go => resources/verify.go} | 151 +++++++++--------- .../servingoperator_postupgrade_test.go | 21 +-- 5 files changed, 156 insertions(+), 94 deletions(-) create mode 100644 test/client/setup.go rename test/{common/common.go => resources/verify.go} (59%) diff --git a/test/client/setup.go b/test/client/setup.go new file mode 100644 index 00000000..5018eaad --- /dev/null +++ b/test/client/setup.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "testing" + + // Mysteriously required to support GCP auth (required by k8s libs). + // Apparently just importing it is enough. @_@ side effects @_@. + // https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + pkgTest "knative.dev/pkg/test" + "knative.dev/serving-operator/test" +) + +// Setup creates the client objects needed in the e2e tests. +func Setup(t *testing.T) *test.Clients { + clients, err := test.NewClients( + pkgTest.Flags.Kubeconfig, + pkgTest.Flags.Cluster) + if err != nil { + t.Fatalf("Couldn't initialize clients: %v", err) + } + return clients +} diff --git a/test/e2e/knativeservingdeployment_test.go b/test/e2e/knativeservingdeployment_test.go index 6fd0d6d7..38a6b743 100644 --- a/test/e2e/knativeservingdeployment_test.go +++ b/test/e2e/knativeservingdeployment_test.go @@ -20,15 +20,15 @@ import ( "knative.dev/pkg/test/logstream" "knative.dev/serving-operator/test" + "knative.dev/serving-operator/test/client" "knative.dev/serving-operator/test/resources" - "knative.dev/serving-operator/test/common" ) // TestKnativeServingDeployment verifies the KnativeServing creation, deployment recreation, and KnativeServing deletion. func TestKnativeServingDeployment(t *testing.T) { cancel := logstream.Start(t) defer cancel() - clients := common.Setup(t) + clients := client.Setup(t) names := test.ResourceNames{ KnativeServing: test.ServingOperatorName, @@ -45,23 +45,23 @@ func TestKnativeServingDeployment(t *testing.T) { // Test if KnativeServing can reach the READY status t.Run("create", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) }) t.Run("configure", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) - common.KnativeServingConfigure(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.KSOperatorCRVerifyConfiguration(t, clients, names) }) // Delete the deployments one by one to see if they will be recreated. t.Run("restore", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) - common.DeploymentRecreation(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.DeleteAndVerifyDeployments(t, clients, names) }) // Delete the KnativeServing to see if all resources will be removed t.Run("delete", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) - common.KnativeServingDelete(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.KSOperatorCRDelete(t, clients, names) }) } diff --git a/test/resources/knativeserving.go b/test/resources/knativeserving.go index 0d6cb9c7..421fe7cb 100644 --- a/test/resources/knativeserving.go +++ b/test/resources/knativeserving.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,13 +24,14 @@ import ( "time" "github.com/pkg/errors" - apierrs "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "knative.dev/pkg/test/logging" "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" servingv1alpha1 "knative.dev/serving-operator/pkg/client/clientset/versioned/typed/serving/v1alpha1" @@ -42,6 +43,10 @@ const ( Interval = 10 * time.Second // Timeout specifies the timeout for the function PollImmediate to reach a certain status. Timeout = 5 * time.Minute + // LoggingConfigKey specifies specifies the key name of the logging config map. + LoggingConfigKey = "logging" + // DefaultsConfigKey specifies the key name of the default config map. + DefaultsConfigKey = "defaults" ) // WaitForKnativeServingState polls the status of the KnativeServing called name @@ -110,3 +115,17 @@ func getDeploymentStatus(d *v1.Deployment) corev1.ConditionStatus { } return "unknown" } + +func getTestKSOperatorCRSpec() v1alpha1.KnativeServingSpec { + return v1alpha1.KnativeServingSpec{ + Config: map[string]map[string]string{ + DefaultsConfigKey: { + "revision-timeout-seconds": "200", + }, + LoggingConfigKey: { + "loglevel.controller": "debug", + "loglevel.autoscaler": "debug", + }, + }, + } +} diff --git a/test/common/common.go b/test/resources/verify.go similarity index 59% rename from test/common/common.go rename to test/resources/verify.go index f8fb7240..90b1d482 100644 --- a/test/common/common.go +++ b/test/resources/verify.go @@ -11,14 +11,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package common +package resources import ( - "fmt" "errors" - "k8s.io/apimachinery/pkg/util/wait" - "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" - "knative.dev/serving-operator/test/resources" + "fmt" "path/filepath" "runtime" "testing" @@ -27,122 +24,130 @@ import ( apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - // Mysteriously required to support GCP auth (required by k8s libs). - // Apparently just importing it is enough. @_@ side effects @_@. - // https://github.com/kubernetes/client-go/issues/242 - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - pkgTest "knative.dev/pkg/test" + "k8s.io/apimachinery/pkg/util/wait" + + "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" "knative.dev/serving-operator/test" ) -// Setup creates the client objects needed in the e2e tests. -func Setup(t *testing.T) *test.Clients { - clients, err := test.NewClients( - pkgTest.Flags.Kubeconfig, - pkgTest.Flags.Cluster) - if err != nil { - t.Fatalf("Couldn't initialize clients: %v", err) - } - return clients -} - - -// KnativeServingVerify verifies if the KnativeServing can reach the READY status. -func KnativeServingVerify(t *testing.T, clients *test.Clients, names test.ResourceNames) { - if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, - resources.IsKnativeServingReady); err != nil { +// KSOperatorCRVerifyStatus verifies if the KnativeServing can reach the READY status. +func KSOperatorCRVerifyStatus(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if _, err := WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, + IsKnativeServingReady); err != nil { t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) } } -// KnativeServingConfigure verifies that KnativeServing config is set properly -func KnativeServingConfigure(t *testing.T, clients *test.Clients, names test.ResourceNames) { +// KSOperatorCRVerifyConfiguration verifies that KnativeServing config is set properly +func KSOperatorCRVerifyConfiguration(t *testing.T, clients *test.Clients, names test.ResourceNames) { // We'll arbitrarily choose logging and defaults config - loggingConfigKey := "logging" - loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, loggingConfigKey) - defaultsConfigKey := "defaults" - defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, defaultsConfigKey) + loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, LoggingConfigKey) + defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, DefaultsConfigKey) // Get the existing KS without any spec ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + if err != nil { + t.Fatalf("The operator does not have an existing KS operator CR: %s", names.KnativeServing) + } // Add config to its spec - ks.Spec = v1alpha1.KnativeServingSpec{ - Config: map[string]map[string]string{ - defaultsConfigKey: { - "revision-timeout-seconds": "200", - }, - loggingConfigKey: { - "loglevel.controller": "debug", - "loglevel.autoscaler": "debug", - }, - }, - } - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { + ks.Spec = getTestKSOperatorCRSpec() + + // verify the default config map + ks = verifyDefaultConfig(t, ks, defaultsConfigMapName, clients, names) + + // verify the logging config map + verifyLoggingConfig(t, ks, loggingConfigMapName, clients, names) + + // Delete a single key/value pair + ks = verifySingleKeyDeletion(t, ks, LoggingConfigKey, loggingConfigMapName, clients, names) + + // Use an empty map as the value + ks = verifyEmptyKey(t, ks, DefaultsConfigKey, defaultsConfigMapName, clients, names) + + // Now remove the config from the spec and update + verifyEmptySpec(t, ks, loggingConfigMapName, clients, names) +} + +func verifyDefaultConfig(t *testing.T, ks *v1alpha1.KnativeServing, defaultsConfigMapName string, clients *test.Clients, + names test.ResourceNames) *v1alpha1.KnativeServing { + ks, err := clients.KnativeServing().Update(ks) + if err != nil { t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) } // Verify the relevant configmaps have been updated - err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + err = WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { return m["revision-timeout-seconds"] == "200" }) if err != nil { t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) } - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return ks +} + +func verifyLoggingConfig(t *testing.T, ks *v1alpha1.KnativeServing, loggingConfigMapName string, clients *test.Clients, + names test.ResourceNames) { + err := WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { return m["loglevel.controller"] == "debug" && m["loglevel.autoscaler"] == "debug" }) if err != nil { t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) } +} - // Delete a single key/value pair +func verifySingleKeyDeletion(t *testing.T, ks *v1alpha1.KnativeServing, loggingConfigKey string, + loggingConfigMapName string, clients *test.Clients, names test.ResourceNames) *v1alpha1.KnativeServing { delete(ks.Spec.Config[loggingConfigKey], "loglevel.autoscaler") - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { + ks, err := clients.KnativeServing().Update(ks) + if err != nil { t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) } // Verify the relevant configmap has been updated - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + err = WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { _, autoscalerKeyExists := m["loglevel.autoscaler"] // deleted key/value pair should be removed from the target config map return m["loglevel.controller"] == "debug" && !autoscalerKeyExists }) if err != nil { - t.Fatal("The operator failed to update the configmap") + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) } + return ks +} - // Use an empty map as the value +func verifyEmptyKey(t *testing.T, ks *v1alpha1.KnativeServing, defaultsConfigKey string, + defaultsConfigMapName string, clients *test.Clients, names test.ResourceNames) *v1alpha1.KnativeServing { ks.Spec.Config[defaultsConfigKey] = map[string]string{} - // Update it - if ks, err = clients.KnativeServing().Update(ks); err != nil { + ks, err := clients.KnativeServing().Update(ks) + if err != nil { t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) } // Verify the relevant configmap has been updated and does not contain any keys except "_example" - err = resources.WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + err = WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { _, exampleExists := m["_example"] return len(m) == 1 && exampleExists }) if err != nil { - t.Fatal("The operator failed to update the configmap") + t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) } + return ks +} - // Now remove the config from the spec and update +func verifyEmptySpec(t *testing.T, ks *v1alpha1.KnativeServing, loggingConfigMapName string, clients *test.Clients, + names test.ResourceNames) { ks.Spec = v1alpha1.KnativeServingSpec{} - if ks, err = clients.KnativeServing().Update(ks); err != nil { + if _, err := clients.KnativeServing().Update(ks); err != nil { t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) } - // And verify the configmap entry is gone - err = resources.WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + err := WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { _, exists := m["loglevel.controller"] return !exists }) if err != nil { - t.Fatal("The operator failed to revert the configmap") + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) } } -// DeploymentRecreation verify whether all the deployments for knative serving are able to recreate, when they are deleted. -func DeploymentRecreation(t *testing.T, clients *test.Clients, names test.ResourceNames) { +// DeleteAndVerifyDeployments verify whether all the deployments for knative serving are able to recreate, when they are deleted. +func DeleteAndVerifyDeployments(t *testing.T, clients *test.Clients, names test.ResourceNames) { dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Failed to get any deployment under the namespace %q: %v", @@ -159,7 +164,7 @@ func DeploymentRecreation(t *testing.T, clients *test.Clients, names test.Resour t.Fatalf("Failed to delete deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) } - waitErr := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { + waitErr := wait.PollImmediate(Interval, Timeout, func() (bool, error) { dep, err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) if err != nil { // If the deployment is not found, we continue to wait for the availability. @@ -168,26 +173,26 @@ func DeploymentRecreation(t *testing.T, clients *test.Clients, names test.Resour } return false, err } - return resources.IsDeploymentAvailable(dep) + return IsDeploymentAvailable(dep) }) if waitErr != nil { t.Fatalf("The deployment %s/%s failed to reach the desired state: %v", deployment.Namespace, deployment.Name, err) } - if _, err := resources.WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, - resources.IsKnativeServingReady); err != nil { + if _, err := WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, + IsKnativeServingReady); err != nil { t.Fatalf("KnativeService %q failed to reach the desired state: %v", test.ServingOperatorName, err) } t.Logf("The deployment %s/%s reached the desired state.", deployment.Namespace, deployment.Name) } -// KnativeServingDelete deletes tha KnativeServing to see if all resources will be deleted -func KnativeServingDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { +// KSOperatorCRDelete deletes tha KnativeServing to see if all resources will be deleted +func KSOperatorCRDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { if err := clients.KnativeServing().Delete(names.KnativeServing, &metav1.DeleteOptions{}); err != nil { t.Fatalf("KnativeServing %q failed to delete: %v", names.KnativeServing, err) } - err := wait.PollImmediate(resources.Interval, resources.Timeout, func() (bool, error) { + err := wait.PollImmediate(Interval, Timeout, func() (bool, error) { _, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) if apierrs.IsNotFound(err) { return true, nil @@ -202,7 +207,7 @@ func KnativeServingDelete(t *testing.T, clients *test.Clients, names test.Resour if err != nil { t.Fatal("Failed to load manifest", err) } - if err := verifyNoKnativeServings(clients); err != nil { + if err := verifyNoKSOperatorCR(clients); err != nil { t.Fatal(err) } for _, u := range m.Resources { @@ -218,7 +223,7 @@ func KnativeServingDelete(t *testing.T, clients *test.Clients, names test.Resour } } -func verifyNoKnativeServings(clients *test.Clients) error { +func verifyNoKSOperatorCR(clients *test.Clients) error { servings, err := clients.KnativeServingAll().List(metav1.ListOptions{}) if err != nil { return err diff --git a/test/upgrade/servingoperator_postupgrade_test.go b/test/upgrade/servingoperator_postupgrade_test.go index 02cb6846..8436c9f1 100644 --- a/test/upgrade/servingoperator_postupgrade_test.go +++ b/test/upgrade/servingoperator_postupgrade_test.go @@ -19,10 +19,11 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/test/logstream" "knative.dev/serving-operator/test" + "knative.dev/serving-operator/test/client" "knative.dev/serving-operator/test/resources" - "knative.dev/serving-operator/test/common" ) // TestKnativeServingPostUpgrade verifies the KnativeServing creation, deployment recreation, and KnativeServing deletion @@ -30,7 +31,7 @@ import ( func TestKnativeServingPostUpgrade(t *testing.T) { cancel := logstream.Start(t) defer cancel() - clients := common.Setup(t) + clients := client.Setup(t) names := test.ResourceNames{ KnativeServing: test.ServingOperatorName, @@ -47,29 +48,29 @@ func TestKnativeServingPostUpgrade(t *testing.T) { // Test if KnativeServing can reach the READY status after upgrade t.Run("create", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) }) // Verify if resources match the latest requirement after upgrade t.Run("verify resources", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) // TODO: We only verify the deployment, but we need to add other resources as well, like ServiceAccount, ClusterRoleBinding, etc. - ExpectedDeployments := []string{"networking-istio", "webhook", "controller", "activator", "autoscaler-hpa", + expectedDeployments := []string{"networking-istio", "webhook", "controller", "activator", "autoscaler-hpa", "autoscaler"} - knativeServingVerifyDeployment(t, clients, names, ExpectedDeployments) + ksVerifyDeployment(t, clients, names, expectedDeployments) }) // TODO: We will add one or sections here to run the tests tagged with postupgrade in knative serving. // Delete the KnativeServing to see if all resources will be removed after upgrade t.Run("delete", func(t *testing.T) { - common.KnativeServingVerify(t, clients, names) - common.KnativeServingDelete(t, clients, names) + resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.KSOperatorCRDelete(t, clients, names) }) } -// knativeServingVerifyDeployment verify whether the deployments have the correct number and names. -func knativeServingVerifyDeployment(t *testing.T, clients *test.Clients, names test.ResourceNames, +// ksVerifyDeployment verify whether the deployments have the correct number and names. +func ksVerifyDeployment(t *testing.T, clients *test.Clients, names test.ResourceNames, expectedDeployments []string) { dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) assertEqual(t, err, nil) From 59849d4350b0586e998f67970df6d29fe2e6e99a Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Tue, 11 Feb 2020 10:13:55 -0500 Subject: [PATCH 4/4] Refactor the PR based on the comments --- test/e2e-common.sh | 9 --------- test/e2e-upgrade-tests.sh | 9 ++++----- test/e2e/knativeservingdeployment_test.go | 10 +++++----- test/resources/knativeserving.go | 4 ++-- test/resources/verify.go | 5 ++--- test/upgrade/servingoperator_postupgrade_test.go | 8 ++++---- 6 files changed, 17 insertions(+), 28 deletions(-) diff --git a/test/e2e-common.sh b/test/e2e-common.sh index f54a6af5..8aa7264c 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -53,15 +53,6 @@ function istio_yaml() { # Install Istio. function install_istio() { local base_url="https://raw.githubusercontent.com/knative/serving/${LATEST_SERVING_RELEASE_VERSION}" - # Decide the Istio configuration to install. - if [[ -z "$ISTIO_VERSION" ]]; then - # Defaults to 1.4-latest - ISTIO_VERSION="1.4-latest" - fi - if [[ -z "$ISTIO_MESH" ]]; then - # Defaults to using mesh. - ISTIO_MESH=1 - fi INSTALL_ISTIO_CRD_YAML="${base_url}/$(istio_crds_yaml $ISTIO_VERSION)" INSTALL_ISTIO_YAML="${base_url}/$(istio_yaml $ISTIO_VERSION $ISTIO_MESH)" diff --git a/test/e2e-upgrade-tests.sh b/test/e2e-upgrade-tests.sh index ad5362ec..430d6e58 100755 --- a/test/e2e-upgrade-tests.sh +++ b/test/e2e-upgrade-tests.sh @@ -38,15 +38,14 @@ KNATIVE_SERVING_DIR=${OPERATOR_DIR}/.. function install_latest_operator_release() { header "Installing Knative Serving operator latest public release" - local url="https://github.com/knative/serving-operator/releases/download/${LATEST_SERVING_RELEASE_VERSION}" - local yaml="serving-operator.yaml" + local full_url="https://github.com/knative/serving-operator/releases/download/${LATEST_SERVING_RELEASE_VERSION}/serving-operator.yaml" - local RELEASE_YAML="$(mktemp)" - wget "${url}/${yaml}" -O "${RELEASE_YAML}" \ + local release_yaml="$(mktemp)" + wget "${full_url}" -O "${release_yaml}" \ || fail_test "Unable to download latest Knative Serving Operator release." install_istio || fail_test "Istio installation failed" - kubectl apply -f "${RELEASE_YAML}" || fail_test "Knative Serving Operator latest release installation failed" + kubectl apply -f "${release_yaml}" || fail_test "Knative Serving Operator latest release installation failed" create_custom_resource wait_until_pods_running ${TEST_NAMESPACE} } diff --git a/test/e2e/knativeservingdeployment_test.go b/test/e2e/knativeservingdeployment_test.go index 38a6b743..39711e71 100644 --- a/test/e2e/knativeservingdeployment_test.go +++ b/test/e2e/knativeservingdeployment_test.go @@ -39,29 +39,29 @@ func TestKnativeServingDeployment(t *testing.T) { defer test.TearDown(clients, names) // Create a KnativeServing - if _, err := resources.CreateKnativeServing(clients.KnativeServing(), names); err != nil { + if _, err := resources.EnsureKnativeServingExists(clients.KnativeServing(), names); err != nil { t.Fatalf("KnativeService %q failed to create: %v", names.KnativeServing, err) } // Test if KnativeServing can reach the READY status t.Run("create", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) }) t.Run("configure", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) resources.KSOperatorCRVerifyConfiguration(t, clients, names) }) // Delete the deployments one by one to see if they will be recreated. t.Run("restore", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) resources.DeleteAndVerifyDeployments(t, clients, names) }) // Delete the KnativeServing to see if all resources will be removed t.Run("delete", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) resources.KSOperatorCRDelete(t, clients, names) }) } diff --git a/test/resources/knativeserving.go b/test/resources/knativeserving.go index 421fe7cb..c438b632 100644 --- a/test/resources/knativeserving.go +++ b/test/resources/knativeserving.go @@ -69,8 +69,8 @@ func WaitForKnativeServingState(clients servingv1alpha1.KnativeServingInterface, return lastState, nil } -// CreateKnativeServing creates a KnativeServing with the name names.KnativeServing under the namespace names.Namespace. -func CreateKnativeServing(clients servingv1alpha1.KnativeServingInterface, names test.ResourceNames) (*v1alpha1.KnativeServing, error) { +// EnsureKnativeServingExists creates a KnativeServing with the name names.KnativeServing under the namespace names.Namespace, if it does not exist. +func EnsureKnativeServingExists(clients servingv1alpha1.KnativeServingInterface, names test.ResourceNames) (*v1alpha1.KnativeServing, error) { // If this function is called by the upgrade tests, we only create the custom resource, if it does not exist. ks, err := clients.Get(names.KnativeServing, metav1.GetOptions{}) if apierrs.IsNotFound(err) { diff --git a/test/resources/verify.go b/test/resources/verify.go index 90b1d482..51ffb55e 100644 --- a/test/resources/verify.go +++ b/test/resources/verify.go @@ -30,13 +30,12 @@ import ( "knative.dev/serving-operator/test" ) -// KSOperatorCRVerifyStatus verifies if the KnativeServing can reach the READY status. -func KSOperatorCRVerifyStatus(t *testing.T, clients *test.Clients, names test.ResourceNames) { +// AssertKSOperatorCRReadyStatus verifies if the KnativeServing reaches the READY status. +func AssertKSOperatorCRReadyStatus(t *testing.T, clients *test.Clients, names test.ResourceNames) { if _, err := WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, IsKnativeServingReady); err != nil { t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) } - } // KSOperatorCRVerifyConfiguration verifies that KnativeServing config is set properly diff --git a/test/upgrade/servingoperator_postupgrade_test.go b/test/upgrade/servingoperator_postupgrade_test.go index 8436c9f1..b4221c40 100644 --- a/test/upgrade/servingoperator_postupgrade_test.go +++ b/test/upgrade/servingoperator_postupgrade_test.go @@ -42,18 +42,18 @@ func TestKnativeServingPostUpgrade(t *testing.T) { defer test.TearDown(clients, names) // Create a KnativeServing custom resource, if it does not exist - if _, err := resources.CreateKnativeServing(clients.KnativeServing(), names); err != nil { + if _, err := resources.EnsureKnativeServingExists(clients.KnativeServing(), names); err != nil { t.Fatalf("KnativeService %q failed to create: %v", names.KnativeServing, err) } // Test if KnativeServing can reach the READY status after upgrade t.Run("create", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) }) // Verify if resources match the latest requirement after upgrade t.Run("verify resources", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) // TODO: We only verify the deployment, but we need to add other resources as well, like ServiceAccount, ClusterRoleBinding, etc. expectedDeployments := []string{"networking-istio", "webhook", "controller", "activator", "autoscaler-hpa", "autoscaler"} @@ -64,7 +64,7 @@ func TestKnativeServingPostUpgrade(t *testing.T) { // Delete the KnativeServing to see if all resources will be removed after upgrade t.Run("delete", func(t *testing.T) { - resources.KSOperatorCRVerifyStatus(t, clients, names) + resources.AssertKSOperatorCRReadyStatus(t, clients, names) resources.KSOperatorCRDelete(t, clients, names) }) }