diff --git a/test/e2e/e2e.go b/test/client/setup.go similarity index 95% rename from test/e2e/e2e.go rename to test/client/setup.go index a2385dbc..5018eaad 100644 --- a/test/e2e/e2e.go +++ b/test/client/setup.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -11,7 +11,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package e2e +package client import ( "testing" @@ -20,6 +20,7 @@ import ( // Apparently just importing it is enough. @_@ side effects @_@. // https://github.com/kubernetes/client-go/issues/242 _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + pkgTest "knative.dev/pkg/test" "knative.dev/serving-operator/test" ) diff --git a/test/e2e-common.sh b/test/e2e-common.sh index a0ae6af8..8aa7264c 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -20,9 +20,9 @@ source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/e2e-tests.sh # Latest serving release. This is intentionally hardcoded for now, but # will need the ability to test against the latest successful serving # CI runs in the future. -readonly LATEST_SERVING_RELEASE_VERSION=0.6.0 +readonly LATEST_SERVING_RELEASE_VERSION=$(git describe --match "v[0-9]*" --abbrev=0) # Istio version we test with -readonly ISTIO_VERSION=1.1.3 +readonly ISTIO_VERSION="1.4.2" # Test without Istio mesh enabled readonly ISTIO_MESH=0 # Namespace used for tests @@ -43,23 +43,16 @@ function istio_yaml() { local istio_mesh=$2 local suffix="" if [[ $istio_mesh -eq 0 ]]; then - suffix="-lean" + suffix="ci-no-mesh" + else + suffix="ci-mesh" fi - echo "third_party/istio-${istio_version}/istio${suffix}.yaml" + echo "third_party/istio-${istio_version}/istio-${suffix}.yaml" } # Install Istio. function install_istio() { - local base_url="https://raw.githubusercontent.com/knative/serving/v${LATEST_SERVING_RELEASE_VERSION}" - # Decide the Istio configuration to install. - if [[ -z "$ISTIO_VERSION" ]]; then - # Defaults to 1.1-latest - ISTIO_VERSION=1.1-latest - fi - if [[ -z "$ISTIO_MESH" ]]; then - # Defaults to using mesh. - ISTIO_MESH=1 - fi + local base_url="https://raw.githubusercontent.com/knative/serving/${LATEST_SERVING_RELEASE_VERSION}" INSTALL_ISTIO_CRD_YAML="${base_url}/$(istio_crds_yaml $ISTIO_VERSION)" INSTALL_ISTIO_YAML="${base_url}/$(istio_yaml $ISTIO_VERSION $ISTIO_MESH)" @@ -76,10 +69,13 @@ function install_istio() { kubectl apply -f "${INSTALL_ISTIO_YAML}" || return 1 } -function install_serving_operator() { +function create_namespace() { echo ">> Creating test namespaces" + # All the custom resources and Knative Serving resources are created under this TEST_NAMESPACE. kubectl create namespace $TEST_NAMESPACE +} +function install_serving_operator() { header "Installing Knative Serving operator" # Deploy the operator ko apply -f config/ diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 52ce31fb..0f1c3163 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -30,6 +30,7 @@ source $(dirname $0)/e2e-common.sh function knative_setup() { install_istio || fail_test "Istio installation failed" + create_namespace install_serving_operator } diff --git a/test/e2e-upgrade-tests.sh b/test/e2e-upgrade-tests.sh index 2585490f..430d6e58 100755 --- a/test/e2e-upgrade-tests.sh +++ b/test/e2e-upgrade-tests.sh @@ -36,8 +36,47 @@ source $(dirname $0)/e2e-common.sh OPERATOR_DIR=$(dirname $0)/.. KNATIVE_SERVING_DIR=${OPERATOR_DIR}/.. -function knative_setup() { +function install_latest_operator_release() { + header "Installing Knative Serving operator latest public release" + local full_url="https://github.com/knative/serving-operator/releases/download/${LATEST_SERVING_RELEASE_VERSION}/serving-operator.yaml" + + local release_yaml="$(mktemp)" + wget "${full_url}" -O "${release_yaml}" \ + || fail_test "Unable to download latest Knative Serving Operator release." + install_istio || fail_test "Istio installation failed" + kubectl apply -f "${release_yaml}" || fail_test "Knative Serving Operator latest release installation failed" + create_custom_resource + wait_until_pods_running ${TEST_NAMESPACE} +} + +function create_custom_resource() { + echo ">> Creating the custom resource of Knative Serving:" + cat < 0 { - return errors.New("Unable to verify cluster-scoped resources are deleted if any KnativeServing exists") - } - return nil -} diff --git a/test/resources/knativeserving.go b/test/resources/knativeserving.go index a1501732..c438b632 100644 --- a/test/resources/knativeserving.go +++ b/test/resources/knativeserving.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,10 +26,12 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "knative.dev/pkg/test/logging" "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" servingv1alpha1 "knative.dev/serving-operator/pkg/client/clientset/versioned/typed/serving/v1alpha1" @@ -41,6 +43,10 @@ const ( Interval = 10 * time.Second // Timeout specifies the timeout for the function PollImmediate to reach a certain status. Timeout = 5 * time.Minute + // LoggingConfigKey specifies specifies the key name of the logging config map. + LoggingConfigKey = "logging" + // DefaultsConfigKey specifies the key name of the default config map. + DefaultsConfigKey = "defaults" ) // WaitForKnativeServingState polls the status of the KnativeServing called name @@ -63,16 +69,20 @@ func WaitForKnativeServingState(clients servingv1alpha1.KnativeServingInterface, return lastState, nil } -// CreateKnativeServing creates a KnativeServing with the name names.KnativeServing under the namespace names.Namespace. -func CreateKnativeServing(clients servingv1alpha1.KnativeServingInterface, names test.ResourceNames) (*v1alpha1.KnativeServing, error) { - ks := &v1alpha1.KnativeServing{ - ObjectMeta: metav1.ObjectMeta{ - Name: names.KnativeServing, - Namespace: names.Namespace, - }, +// EnsureKnativeServingExists creates a KnativeServing with the name names.KnativeServing under the namespace names.Namespace, if it does not exist. +func EnsureKnativeServingExists(clients servingv1alpha1.KnativeServingInterface, names test.ResourceNames) (*v1alpha1.KnativeServing, error) { + // If this function is called by the upgrade tests, we only create the custom resource, if it does not exist. + ks, err := clients.Get(names.KnativeServing, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + ks := &v1alpha1.KnativeServing{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.KnativeServing, + Namespace: names.Namespace, + }, + } + return clients.Create(ks) } - svc, err := clients.Create(ks) - return svc, err + return ks, err } // WaitForConfigMap takes a condition function that evaluates ConfigMap data @@ -105,3 +115,17 @@ func getDeploymentStatus(d *v1.Deployment) corev1.ConditionStatus { } return "unknown" } + +func getTestKSOperatorCRSpec() v1alpha1.KnativeServingSpec { + return v1alpha1.KnativeServingSpec{ + Config: map[string]map[string]string{ + DefaultsConfigKey: { + "revision-timeout-seconds": "200", + }, + LoggingConfigKey: { + "loglevel.controller": "debug", + "loglevel.autoscaler": "debug", + }, + }, + } +} diff --git a/test/resources/verify.go b/test/resources/verify.go new file mode 100644 index 00000000..51ffb55e --- /dev/null +++ b/test/resources/verify.go @@ -0,0 +1,234 @@ +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "errors" + "fmt" + "path/filepath" + "runtime" + "testing" + + mf "github.com/jcrossley3/manifestival" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + + "knative.dev/serving-operator/pkg/apis/serving/v1alpha1" + "knative.dev/serving-operator/test" +) + +// AssertKSOperatorCRReadyStatus verifies if the KnativeServing reaches the READY status. +func AssertKSOperatorCRReadyStatus(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if _, err := WaitForKnativeServingState(clients.KnativeServing(), names.KnativeServing, + IsKnativeServingReady); err != nil { + t.Fatalf("KnativeService %q failed to get to the READY status: %v", names.KnativeServing, err) + } +} + +// KSOperatorCRVerifyConfiguration verifies that KnativeServing config is set properly +func KSOperatorCRVerifyConfiguration(t *testing.T, clients *test.Clients, names test.ResourceNames) { + // We'll arbitrarily choose logging and defaults config + loggingConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, LoggingConfigKey) + defaultsConfigMapName := fmt.Sprintf("%s/config-%s", names.Namespace, DefaultsConfigKey) + // Get the existing KS without any spec + ks, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + if err != nil { + t.Fatalf("The operator does not have an existing KS operator CR: %s", names.KnativeServing) + } + // Add config to its spec + ks.Spec = getTestKSOperatorCRSpec() + + // verify the default config map + ks = verifyDefaultConfig(t, ks, defaultsConfigMapName, clients, names) + + // verify the logging config map + verifyLoggingConfig(t, ks, loggingConfigMapName, clients, names) + + // Delete a single key/value pair + ks = verifySingleKeyDeletion(t, ks, LoggingConfigKey, loggingConfigMapName, clients, names) + + // Use an empty map as the value + ks = verifyEmptyKey(t, ks, DefaultsConfigKey, defaultsConfigMapName, clients, names) + + // Now remove the config from the spec and update + verifyEmptySpec(t, ks, loggingConfigMapName, clients, names) +} + +func verifyDefaultConfig(t *testing.T, ks *v1alpha1.KnativeServing, defaultsConfigMapName string, clients *test.Clients, + names test.ResourceNames) *v1alpha1.KnativeServing { + ks, err := clients.KnativeServing().Update(ks) + if err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmaps have been updated + err = WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["revision-timeout-seconds"] == "200" + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) + } + return ks +} + +func verifyLoggingConfig(t *testing.T, ks *v1alpha1.KnativeServing, loggingConfigMapName string, clients *test.Clients, + names test.ResourceNames) { + err := WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + return m["loglevel.controller"] == "debug" && m["loglevel.autoscaler"] == "debug" + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) + } +} + +func verifySingleKeyDeletion(t *testing.T, ks *v1alpha1.KnativeServing, loggingConfigKey string, + loggingConfigMapName string, clients *test.Clients, names test.ResourceNames) *v1alpha1.KnativeServing { + delete(ks.Spec.Config[loggingConfigKey], "loglevel.autoscaler") + ks, err := clients.KnativeServing().Update(ks) + if err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmap has been updated + err = WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, autoscalerKeyExists := m["loglevel.autoscaler"] + // deleted key/value pair should be removed from the target config map + return m["loglevel.controller"] == "debug" && !autoscalerKeyExists + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) + } + return ks +} + +func verifyEmptyKey(t *testing.T, ks *v1alpha1.KnativeServing, defaultsConfigKey string, + defaultsConfigMapName string, clients *test.Clients, names test.ResourceNames) *v1alpha1.KnativeServing { + ks.Spec.Config[defaultsConfigKey] = map[string]string{} + ks, err := clients.KnativeServing().Update(ks) + if err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + // Verify the relevant configmap has been updated and does not contain any keys except "_example" + err = WaitForConfigMap(defaultsConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exampleExists := m["_example"] + return len(m) == 1 && exampleExists + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", defaultsConfigMapName) + } + return ks +} + +func verifyEmptySpec(t *testing.T, ks *v1alpha1.KnativeServing, loggingConfigMapName string, clients *test.Clients, + names test.ResourceNames) { + ks.Spec = v1alpha1.KnativeServingSpec{} + if _, err := clients.KnativeServing().Update(ks); err != nil { + t.Fatalf("KnativeServing %q failed to update: %v", names.KnativeServing, err) + } + err := WaitForConfigMap(loggingConfigMapName, clients.KubeClient.Kube, func(m map[string]string) bool { + _, exists := m["loglevel.controller"] + return !exists + }) + if err != nil { + t.Fatalf("The operator failed to update %s configmap", loggingConfigMapName) + } +} + +// DeleteAndVerifyDeployments verify whether all the deployments for knative serving are able to recreate, when they are deleted. +func DeleteAndVerifyDeployments(t *testing.T, clients *test.Clients, names test.ResourceNames) { + dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to get any deployment under the namespace %q: %v", + test.ServingOperatorNamespace, err) + } + if len(dpList.Items) == 0 { + t.Fatalf("No deployment under the namespace %q was found", + test.ServingOperatorNamespace) + } + // Delete the first deployment and verify the operator recreates it + deployment := dpList.Items[0] + if err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name, + &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + } + + waitErr := wait.PollImmediate(Interval, Timeout, func() (bool, error) { + dep, err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) + if err != nil { + // If the deployment is not found, we continue to wait for the availability. + if apierrs.IsNotFound(err) { + return false, nil + } + return false, err + } + return IsDeploymentAvailable(dep) + }) + + if waitErr != nil { + t.Fatalf("The deployment %s/%s failed to reach the desired state: %v", deployment.Namespace, deployment.Name, err) + } + + if _, err := WaitForKnativeServingState(clients.KnativeServing(), test.ServingOperatorName, + IsKnativeServingReady); err != nil { + t.Fatalf("KnativeService %q failed to reach the desired state: %v", test.ServingOperatorName, err) + } + t.Logf("The deployment %s/%s reached the desired state.", deployment.Namespace, deployment.Name) +} + +// KSOperatorCRDelete deletes tha KnativeServing to see if all resources will be deleted +func KSOperatorCRDelete(t *testing.T, clients *test.Clients, names test.ResourceNames) { + if err := clients.KnativeServing().Delete(names.KnativeServing, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("KnativeServing %q failed to delete: %v", names.KnativeServing, err) + } + err := wait.PollImmediate(Interval, Timeout, func() (bool, error) { + _, err := clients.KnativeServing().Get(names.KnativeServing, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }) + if err != nil { + t.Fatal("Timed out waiting on KnativeServing to delete", err) + } + _, b, _, _ := runtime.Caller(0) + m, err := mf.NewManifest(filepath.Join((filepath.Dir(b)+"/.."), "config/"), false, clients.Config) + if err != nil { + t.Fatal("Failed to load manifest", err) + } + if err := verifyNoKSOperatorCR(clients); err != nil { + t.Fatal(err) + } + for _, u := range m.Resources { + if u.GetKind() == "Namespace" { + // The namespace should be skipped, because when the CR is removed, the Manifest to be removed has + // been modified, since the namespace can be injected. + continue + } + gvrs, _ := meta.UnsafeGuessKindToResource(u.GroupVersionKind()) + if _, err := clients.Dynamic.Resource(gvrs).Get(u.GetName(), metav1.GetOptions{}); !apierrs.IsNotFound(err) { + t.Fatalf("The %s %s failed to be deleted: %v", u.GetKind(), u.GetName(), err) + } + } +} + +func verifyNoKSOperatorCR(clients *test.Clients) error { + servings, err := clients.KnativeServingAll().List(metav1.ListOptions{}) + if err != nil { + return err + } + if len(servings.Items) > 0 { + return errors.New("Unable to verify cluster-scoped resources are deleted if any KnativeServing exists") + } + return nil +} diff --git a/test/upgrade/servingoperator_postupgrade_test.go b/test/upgrade/servingoperator_postupgrade_test.go new file mode 100644 index 00000000..b4221c40 --- /dev/null +++ b/test/upgrade/servingoperator_postupgrade_test.go @@ -0,0 +1,97 @@ +// +build postupgrade + +/* +Copyright 2020 The Knative Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/test/logstream" + "knative.dev/serving-operator/test" + "knative.dev/serving-operator/test/client" + "knative.dev/serving-operator/test/resources" +) + +// TestKnativeServingPostUpgrade verifies the KnativeServing creation, deployment recreation, and KnativeServing deletion +// after the operator upgrades with the latest generated manifest of Knative Serving. +func TestKnativeServingPostUpgrade(t *testing.T) { + cancel := logstream.Start(t) + defer cancel() + clients := client.Setup(t) + + names := test.ResourceNames{ + KnativeServing: test.ServingOperatorName, + Namespace: test.ServingOperatorNamespace, + } + + test.CleanupOnInterrupt(func() { test.TearDown(clients, names) }) + defer test.TearDown(clients, names) + + // Create a KnativeServing custom resource, if it does not exist + if _, err := resources.EnsureKnativeServingExists(clients.KnativeServing(), names); err != nil { + t.Fatalf("KnativeService %q failed to create: %v", names.KnativeServing, err) + } + + // Test if KnativeServing can reach the READY status after upgrade + t.Run("create", func(t *testing.T) { + resources.AssertKSOperatorCRReadyStatus(t, clients, names) + }) + + // Verify if resources match the latest requirement after upgrade + t.Run("verify resources", func(t *testing.T) { + resources.AssertKSOperatorCRReadyStatus(t, clients, names) + // TODO: We only verify the deployment, but we need to add other resources as well, like ServiceAccount, ClusterRoleBinding, etc. + expectedDeployments := []string{"networking-istio", "webhook", "controller", "activator", "autoscaler-hpa", + "autoscaler"} + ksVerifyDeployment(t, clients, names, expectedDeployments) + }) + + // TODO: We will add one or sections here to run the tests tagged with postupgrade in knative serving. + + // Delete the KnativeServing to see if all resources will be removed after upgrade + t.Run("delete", func(t *testing.T) { + resources.AssertKSOperatorCRReadyStatus(t, clients, names) + resources.KSOperatorCRDelete(t, clients, names) + }) +} + +// ksVerifyDeployment verify whether the deployments have the correct number and names. +func ksVerifyDeployment(t *testing.T, clients *test.Clients, names test.ResourceNames, + expectedDeployments []string) { + dpList, err := clients.KubeClient.Kube.AppsV1().Deployments(names.Namespace).List(metav1.ListOptions{}) + assertEqual(t, err, nil) + assertEqual(t, len(dpList.Items), len(expectedDeployments)) + for _, deployment := range dpList.Items { + assertEqual(t, stringInList(deployment.Name, expectedDeployments), true) + } +} + +func assertEqual(t *testing.T, actual, expected interface{}) { + if actual == expected { + return + } + t.Fatalf("expected does not equal actual. \nExpected: %v\nActual: %v", expected, actual) +} + +func stringInList(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +}