From 2141b6aa1a776f08ed9ae49c012ac8c8b0389c16 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 24 Aug 2022 18:38:24 -0700 Subject: [PATCH 01/36] Initial Commit --- test/e2e/e2e_test.go | 24 +- test/e2e/framework/cluster.go | 26 +- test/e2e/join_leave_member_test.go | 31 +-- test/e2e/manifests/test-secret.yaml | 8 - test/e2e/utils/helper.go | 122 ++++++++- .../{ => utils}/manifests/test-configmap.yaml | 2 +- .../manifests/test-configmap2.ns.yaml} | 4 +- test/e2e/utils/manifests/test-configmap2.yaml | 9 + test/e2e/{ => utils}/manifests/test-crd.yaml | 0 .../manifests/test-deployment.yaml | 0 .../{ => utils}/manifests/test-namespace.yaml | 2 +- test/e2e/utils/manifests/test-secret.yaml | 8 + .../{ => utils}/manifests/test-service.yaml | 0 .../manifests/test-serviceaccount.yaml | 0 test/e2e/{ => utils}/work_api_test_utils.go | 40 +-- test/e2e/work_api_e2e_test.go | 254 ++++++++++++++++++ test/e2e/work_api_test.go | 202 +++++++------- test/e2e/work_load_test.go | 23 +- 18 files changed, 556 insertions(+), 199 deletions(-) delete mode 100644 test/e2e/manifests/test-secret.yaml rename test/e2e/{ => utils}/manifests/test-configmap.yaml (88%) rename test/e2e/{manifests/test-configmap.ns.yaml => utils/manifests/test-configmap2.ns.yaml} (66%) create mode 100644 test/e2e/utils/manifests/test-configmap2.yaml rename test/e2e/{ => utils}/manifests/test-crd.yaml (100%) rename test/e2e/{ => utils}/manifests/test-deployment.yaml (100%) rename test/e2e/{ => utils}/manifests/test-namespace.yaml (64%) create mode 100644 test/e2e/utils/manifests/test-secret.yaml rename test/e2e/{ => utils}/manifests/test-service.yaml (100%) rename test/e2e/{ => utils}/manifests/test-serviceaccount.yaml (100%) rename test/e2e/{ => utils}/work_api_test_utils.go (64%) create mode 100644 test/e2e/work_api_e2e_test.go diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index ea96d4416..65337ac5b 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -5,11 +5,15 @@ Licensed under the MIT license. package e2e import ( + "fmt" + "go.goms.io/fleet/pkg/utils" + testutils "go.goms.io/fleet/test/e2e/utils" "os" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -27,14 +31,19 @@ var ( MemberCluster = framework.NewCluster(memberClusterName, scheme) hubURL string scheme = runtime.NewScheme() - genericCodecs = serializer.NewCodecFactory(scheme) - genericCodec = genericCodecs.UniversalDeserializer() + memberNs = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + workNs = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + workResourceNs = testutils.NewNamespace("resource-namespace") + + genericCodecs = serializer.NewCodecFactory(scheme) + genericCodec = genericCodecs.UniversalDeserializer() ) func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) utilruntime.Must(workv1alpha1.AddToScheme(scheme)) + utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) } func TestE2E(t *testing.T) { @@ -57,4 +66,15 @@ var _ = BeforeSuite(func() { MemberCluster.HubURL = hubURL framework.GetClusterClient(MemberCluster) + testutils.CreateNamespace(*MemberCluster, memberNs) + + testutils.CreateNamespace(*HubCluster, workNs) + testutils.CreateNamespace(*MemberCluster, workResourceNs) +}) + +var _ = AfterSuite(func() { + testutils.DeleteNamespace(*MemberCluster, memberNs) + + testutils.DeleteNamespace(*HubCluster, workNs) + testutils.DeleteNamespace(*MemberCluster, workResourceNs) }) diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index 5df432772..9e0604623 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -8,6 +8,7 @@ import ( "os" "github.com/onsi/gomega" + apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" @@ -21,14 +22,16 @@ var ( kubeconfigPath = os.Getenv("KUBECONFIG") ) +// Cluster object defines the required clients based on the kubeconfig of the test cluster. type Cluster struct { - Scheme *runtime.Scheme - KubeClient client.Client - KubeClientSet kubernetes.Interface - DynamicClient dynamic.Interface - ClusterName string - HubURL string - RestMapper meta.RESTMapper + Scheme *runtime.Scheme + KubeClient client.Client + KubeClientSet kubernetes.Interface + APIExtensionClient *apiextension.Clientset + DynamicClient dynamic.Interface + ClusterName string + HubURL string + RestMapper meta.RESTMapper } func NewCluster(name string, scheme *runtime.Scheme) *Cluster { @@ -51,13 +54,16 @@ func GetClusterClient(cluster *Cluster) { gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) cluster.KubeClientSet, err = kubernetes.NewForConfig(restConfig) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) + + cluster.APIExtensionClient, err = apiextension.NewForConfig(restConfig) + gomega.Expect(err).Should(gomega.Succeed()) cluster.DynamicClient, err = dynamic.NewForConfig(restConfig) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) cluster.RestMapper, err = apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) } func GetClientConfig(cluster *Cluster) clientcmd.ClientConfig { diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index eab4b7b62..99d0fdb68 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -5,33 +5,24 @@ Licensed under the MIT license. package e2e import ( - "context" "fmt" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "go.goms.io/fleet/apis/v1alpha1" "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var _ = Describe("Join/leave member cluster testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount - var memberNS *corev1.Namespace + var memberNsName string var imc *v1alpha1.InternalMemberCluster BeforeEach(func() { - memberNS = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) - By("prepare resources in member cluster") - // create testing NS in member cluster - testutils.CreateNamespace(*MemberCluster, memberNS) - sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNS.Name) + memberNsName = fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName) + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNsName) testutils.CreateServiceAccount(*MemberCluster, sa) By("deploy member cluster in the hub cluster") @@ -39,7 +30,7 @@ var _ = Describe("Join/leave member cluster testing", func() { testutils.CreateMemberCluster(*HubCluster, mc) By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNS.Name) + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNsName) testutils.WaitInternalMemberCluster(*HubCluster, imc) By("check if member cluster is marked as readyToJoin") @@ -47,16 +38,8 @@ var _ = Describe("Join/leave member cluster testing", func() { }) AfterEach(func() { - testutils.DeleteNamespace(*MemberCluster, memberNS) - Eventually(func() bool { - err := MemberCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) + testutils.DeleteServiceAccount(*MemberCluster, sa) testutils.DeleteMemberCluster(*HubCluster, mc) - Eventually(func() bool { - err := HubCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) }) It("Join & Leave flow is successful ", func() { diff --git a/test/e2e/manifests/test-secret.yaml b/test/e2e/manifests/test-secret.yaml deleted file mode 100644 index 707b1613d..000000000 --- a/test/e2e/manifests/test-secret.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: test-secret - namespace: default -data: - somekey: Q2xpZW50SWQ6IDUxOTEwNTY4LTM0YzktNGQ0ZS1iODA1LTNmNTY3NWQyMDdiYwpDbGllbnRTZWNyZXQ6IDZSLThRfkJvSDNNYm1+eGJpaDhmNVZibHBkWGxzeGQyRnp+WXhjWjYKVGVuYW50SWQ6IDcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0NwpTdWJzY3JpcHRpb25JZDogMmIwM2JmYjgtZTg4NS00NTY2LWE2MmEtOTA5YTExZDcxNjkyClJlc291cmNlR3JvdXA6IGNhcmF2ZWwtZGVtbw== -type: generic diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 2b58a9a96..68569aea1 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -6,7 +6,9 @@ package utils import ( "context" + "embed" "fmt" + "github.com/onsi/gomega/format" "time" "github.com/onsi/ginkgo/v2" @@ -15,7 +17,11 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" "k8s.io/klog/v2" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -23,11 +29,18 @@ import ( "go.goms.io/fleet/test/e2e/framework" ) +const ( + conditionTypeApplied = "Applied" +) + var ( // PollInterval defines the interval time for a poll operation. PollInterval = 5 * time.Second // PollTimeout defines the time after which the poll operation times out. - PollTimeout = 60 * time.Second + PollTimeout = 90 * time.Second + + //go:embed manifests + TestManifestFiles embed.FS ) // NewMemberCluster return a new member cluster. @@ -82,7 +95,7 @@ func NewNamespace(name string) *corev1.Namespace { func CreateMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) { ginkgo.By(fmt.Sprintf("Creating MemberCluster(%s)", mc.Name), func() { err := cluster.KubeClient.Create(context.TODO(), mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) klog.Infof("Waiting for MemberCluster(%s) to be synced", mc.Name) gomega.Eventually(func() error { @@ -94,17 +107,17 @@ func CreateMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) // UpdateMemberClusterState updates MemberCluster in the hub cluster. func UpdateMemberClusterState(cluster framework.Cluster, mc *v1alpha1.MemberCluster, state v1alpha1.ClusterState) { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: mc.Name, Namespace: ""}, mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) mc.Spec.State = state err = cluster.KubeClient.Update(context.TODO(), mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) } // DeleteMemberCluster deletes MemberCluster in the hub cluster. func DeleteMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) { ginkgo.By(fmt.Sprintf("Deleting MemberCluster(%s)", mc.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -134,7 +147,9 @@ func WaitConditionInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1 klog.Infof("Waiting for InternalMemberCluster(%s) condition(%s) status(%s) to be synced in the %s cluster", imc.Name, conditionType, status, cluster.ClusterName) gomega.Eventually(func() bool { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err != nil { + return false + } cond := imc.GetConditionWithType(v1alpha1.MemberAgent, string(conditionType)) return cond != nil && cond.Status == status }, customTimeout, PollInterval).Should(gomega.Equal(true)) @@ -144,7 +159,7 @@ func WaitConditionInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1 func CreateClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { ginkgo.By(fmt.Sprintf("Creating ClusterRole (%s)", cr.Name), func() { err := cluster.KubeClient.Create(context.TODO(), cr) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -161,7 +176,7 @@ func WaitClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { func DeleteClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { ginkgo.By(fmt.Sprintf("Deleting ClusterRole(%s)", cr.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), cr) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -169,7 +184,7 @@ func DeleteClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { func CreateClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { ginkgo.By(fmt.Sprintf("Creating ClusterResourcePlacement(%s)", crp.Name), func() { err := cluster.KubeClient.Create(context.TODO(), crp) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) klog.Infof("Waiting for ClusterResourcePlacement(%s) to be synced", crp.Name) gomega.Eventually(func() error { @@ -194,7 +209,7 @@ func WaitConditionClusterResourcePlacement(cluster framework.Cluster, crp *v1alp func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { ginkgo.By(fmt.Sprintf("Deleting ClusterResourcePlacement(%s)", crp.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), crp) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -226,7 +241,7 @@ func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) { ginkgo.By(fmt.Sprintf("Deleting Namespace(%s)", ns.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), ns) if err != nil && !apierrors.IsNotFound(err) { - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) } }) } @@ -235,7 +250,7 @@ func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) { func CreateServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) { ginkgo.By(fmt.Sprintf("Creating ServiceAccount(%s)", sa.Name), func() { err := cluster.KubeClient.Create(context.TODO(), sa) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -243,6 +258,87 @@ func CreateServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) { ginkgo.By(fmt.Sprintf("Delete ServiceAccount(%s)", sa.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), sa) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } + +// CreateWork creates Work object based on manifest given. +func CreateWork(workName string, workNamespace string, ctx context.Context, hubCluster framework.Cluster, workList []workapi.Work, manifests []workapi.Manifest) { + ginkgo.By(fmt.Sprintf("Creating Work with Name %s, %s", workName, workNamespace)) + work := workapi.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: workNamespace, + }, + Spec: workapi.WorkSpec{ + Workload: workapi.WorkloadTemplate{ + Manifests: manifests, + }, + }, + } + + workList = append(workList, work) + err := hubCluster.KubeClient.Create(ctx, &work) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) +} + +func DeleteWork(hubCluster framework.Cluster, workList []workapi.Work, ctx context.Context) error { + if len(workList) > 0 { + for _, work := range workList { + err := hubCluster.KubeClient.Delete(ctx, &work) + return err + } + } + + return nil +} + +func AppliedWorkContainsResource(resourceMeta workapi.AppliedResourceMeta, name string, version string, kind string) bool { + if resourceMeta.Name != name || resourceMeta.Version != version || resourceMeta.Kind != kind { + return false + } + return true +} + +func GenerateCRDObjectFromFile(filepath string, genericCodec runtime.Decoder, cluster framework.Cluster) (*schema.GroupVersionKind, runtime.RawExtension) { + fileRaw, err := TestManifestFiles.ReadFile(filepath) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "Reading manifest file %s failed", filepath) + + obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "Decoding manifest file %s failed", filepath) + + jsonObj, err := json.Marshal(obj) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "Marshalling failed for file %s", filepath) + + newObj := &unstructured.Unstructured{} + err = newObj.UnmarshalJSON(jsonObj) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "UnMarshaling failed for file %s", filepath) + + _, err = cluster.RestMapper.RESTMapping(newObj.GroupVersionKind().GroupKind(), newObj.GroupVersionKind().Version) + gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "CRD data was not mapped in the restMapper") + + return gvk, runtime.RawExtension{Object: obj, Raw: jsonObj} +} + +// AlreadyExistMatcher matches the error to be already exist +type AlreadyExistMatcher struct { +} + +// Match matches error. +func (matcher AlreadyExistMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, nil + } + actualError := actual.(error) + return apierrors.IsAlreadyExists(actualError), nil +} + +// FailureMessage builds an error message. +func (matcher AlreadyExistMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be already exist") +} + +// NegatedFailureMessage builds an error message. +func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be already exist") +} diff --git a/test/e2e/manifests/test-configmap.yaml b/test/e2e/utils/manifests/test-configmap.yaml similarity index 88% rename from test/e2e/manifests/test-configmap.yaml rename to test/e2e/utils/manifests/test-configmap.yaml index 675af6a86..ba3b7b987 100644 --- a/test/e2e/manifests/test-configmap.yaml +++ b/test/e2e/utils/manifests/test-configmap.yaml @@ -6,4 +6,4 @@ metadata: data: fielda: one fieldb: two - fieldc: three + fieldc: three \ No newline at end of file diff --git a/test/e2e/manifests/test-configmap.ns.yaml b/test/e2e/utils/manifests/test-configmap2.ns.yaml similarity index 66% rename from test/e2e/manifests/test-configmap.ns.yaml rename to test/e2e/utils/manifests/test-configmap2.ns.yaml index 780a1b358..eeac617da 100644 --- a/test/e2e/manifests/test-configmap.ns.yaml +++ b/test/e2e/utils/manifests/test-configmap2.ns.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: test-configmap + name: test-configmap2 namespace: test-namespace data: - fielda: one + fielda: one \ No newline at end of file diff --git a/test/e2e/utils/manifests/test-configmap2.yaml b/test/e2e/utils/manifests/test-configmap2.yaml new file mode 100644 index 000000000..2c43bcd62 --- /dev/null +++ b/test/e2e/utils/manifests/test-configmap2.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap2 + namespace: default +data: + fielda: one + fieldb: two + fieldc: three \ No newline at end of file diff --git a/test/e2e/manifests/test-crd.yaml b/test/e2e/utils/manifests/test-crd.yaml similarity index 100% rename from test/e2e/manifests/test-crd.yaml rename to test/e2e/utils/manifests/test-crd.yaml diff --git a/test/e2e/manifests/test-deployment.yaml b/test/e2e/utils/manifests/test-deployment.yaml similarity index 100% rename from test/e2e/manifests/test-deployment.yaml rename to test/e2e/utils/manifests/test-deployment.yaml diff --git a/test/e2e/manifests/test-namespace.yaml b/test/e2e/utils/manifests/test-namespace.yaml similarity index 64% rename from test/e2e/manifests/test-namespace.yaml rename to test/e2e/utils/manifests/test-namespace.yaml index f8db044d1..10dfe3552 100644 --- a/test/e2e/manifests/test-namespace.yaml +++ b/test/e2e/utils/manifests/test-namespace.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: test-namespace + name: test-namespace \ No newline at end of file diff --git a/test/e2e/utils/manifests/test-secret.yaml b/test/e2e/utils/manifests/test-secret.yaml new file mode 100644 index 000000000..fa2d65dd1 --- /dev/null +++ b/test/e2e/utils/manifests/test-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: test-secret + namespace: default +data: + somekey: dGVzdA== +type: generic \ No newline at end of file diff --git a/test/e2e/manifests/test-service.yaml b/test/e2e/utils/manifests/test-service.yaml similarity index 100% rename from test/e2e/manifests/test-service.yaml rename to test/e2e/utils/manifests/test-service.yaml diff --git a/test/e2e/manifests/test-serviceaccount.yaml b/test/e2e/utils/manifests/test-serviceaccount.yaml similarity index 100% rename from test/e2e/manifests/test-serviceaccount.yaml rename to test/e2e/utils/manifests/test-serviceaccount.yaml diff --git a/test/e2e/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go similarity index 64% rename from test/e2e/work_api_test_utils.go rename to test/e2e/utils/work_api_test_utils.go index c15cac3e2..0a73f7750 100644 --- a/test/e2e/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -3,12 +3,13 @@ Copyright (c) Microsoft Corporation. Licensed under the MIT license. */ -package e2e +package utils import ( "context" - "embed" - + "fmt" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -19,19 +20,14 @@ import ( "go.goms.io/fleet/test/e2e/framework" ) -var ( - //go:embed manifests - testManifestFiles embed.FS -) - -type manifestDetails struct { +type ManifestDetails struct { Manifest workapi.Manifest GVK *schema.GroupVersionKind GVR *schema.GroupVersionResource ObjMeta metav1.ObjectMeta } -func createWorkObj(workName string, workNamespace string, manifestDetails []manifestDetails) *workapi.Work { +func CreateWorkObj(workName string, workNamespace string, manifestDetails []ManifestDetails) *workapi.Work { work := &workapi.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, @@ -46,22 +42,22 @@ func createWorkObj(workName string, workNamespace string, manifestDetails []mani return work } -func createWork(work *workapi.Work, hubCluster *framework.Cluster) error { +func CreateWorkOld(work *workapi.Work, hubCluster *framework.Cluster) error { return hubCluster.KubeClient.Create(context.Background(), work) } -func decodeUnstructured(manifest workapi.Manifest) (*unstructured.Unstructured, error) { +func DecodeUnstructured(manifest workapi.Manifest) (*unstructured.Unstructured, error) { unstructuredObj := &unstructured.Unstructured{} err := unstructuredObj.UnmarshalJSON(manifest.Raw) return unstructuredObj, err } -func deleteWorkResource(work *workapi.Work, hubCluster *framework.Cluster) error { +func DeleteWorkResource(work *workapi.Work, hubCluster *framework.Cluster) error { return hubCluster.KubeClient.Delete(context.Background(), work) } -func retrieveAppliedWork(appliedWorkName string, memberCluster *framework.Cluster) (*workapi.AppliedWork, error) { +func RetrieveAppliedWork(appliedWorkName string, memberCluster *framework.Cluster) (*workapi.AppliedWork, error) { retrievedAppliedWork := workapi.AppliedWork{} err := memberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: appliedWorkName}, &retrievedAppliedWork) if err != nil { @@ -71,7 +67,7 @@ func retrieveAppliedWork(appliedWorkName string, memberCluster *framework.Cluste return &retrievedAppliedWork, nil } -func retrieveWork(workNamespace string, workName string, hubCluster *framework.Cluster) (*workapi.Work, error) { +func RetrieveWork(workNamespace string, workName string, hubCluster *framework.Cluster) (*workapi.Work, error) { workRetrieved := workapi.Work{} err := hubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Namespace: workNamespace, Name: workName}, &workRetrieved) if err != nil { @@ -82,19 +78,27 @@ func retrieveWork(workNamespace string, workName string, hubCluster *framework.C return &workRetrieved, nil } -func updateWork(work *workapi.Work, hubCluster *framework.Cluster) (*workapi.Work, error) { +func UpdateWork(work *workapi.Work, hubCluster *framework.Cluster) (*workapi.Work, error) { err := hubCluster.KubeClient.Update(context.Background(), work) if err != nil { return nil, err } - updatedWork, err := retrieveWork(work.Namespace, work.Name, hubCluster) + updatedWork, err := RetrieveWork(work.Namespace, work.Name, hubCluster) if err != nil { return nil, err } return updatedWork, err } -func getWorkName(length int) string { +func WaitAppliedWorkPresent(workName string, memberCluster *framework.Cluster) { + ginkgo.By(fmt.Sprintf("Waiting for AppliedWork to be created with Name %s on memberCluster %s", workName, memberCluster.ClusterName)) + gomega.Eventually(func() error { + _, err := RetrieveAppliedWork(workName, memberCluster) + return err + }, PollTimeout, PollInterval).Should(gomega.BeNil()) +} + +func GetWorkName(length int) string { return "work" + rand.String(length) } diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go new file mode 100644 index 000000000..927d96291 --- /dev/null +++ b/test/e2e/work_api_e2e_test.go @@ -0,0 +1,254 @@ +package e2e + +import ( + "context" + "fmt" + "k8s.io/apimachinery/pkg/api/meta" + "reflect" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.goms.io/fleet/pkg/utils" + testutils "go.goms.io/fleet/test/e2e/utils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" +) + +// TODO: when join/leave logic is connected to work-api, join the Hub and Member for this test. +var _ = Describe("Work API Controller test", func() { + + const ( + conditionTypeApplied = "Applied" + ) + + var ( + ctx context.Context + workList []workapi.Work + ) + + BeforeEach(func() { + ctx = context.Background() + }) + + AfterEach(func() { + if len(workList) > 0 { + err := testutils.DeleteWork(*HubCluster, workList, ctx) + Expect(err).Should(Succeed(), "Deletion of work failed.") + } + }) + + It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { + workName := utils.RandStr() + + // Configmap will be included in this work object. + manifestConfigMapName := "work-configmap" + manifestConfigMap := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: manifestConfigMapName, + Namespace: workResourceNs.Name, + }, + Data: map[string]string{ + "test-key": "test-data", + }, + } + By(fmt.Sprintf("creating work %s/%s of %s", workName, workNs.Name, manifestConfigMapName)) + testutils.CreateWork(workName, workNs.Name, ctx, *HubCluster, workList, []workapi.Manifest{ + { + RawExtension: runtime.RawExtension{Object: &manifestConfigMap}, + }, + }) + + By(fmt.Sprintf("Waiting for AppliedWork %s to be created", workName)) + Eventually(func() error { + return MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to create AppliedWork %s", workName) + + By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) + Eventually(func() bool { + work := workapi.Work{} + err := HubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) + if err != nil { + return false + } + + return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) + Eventually(func() bool { + appliedWork := workapi.AppliedWork{} + err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + if err != nil { + return false + } + for _, status := range appliedWork.Status.AppliedResources { + if testutils.AppliedWorkContainsResource(status, manifestConfigMap.Name, manifestConfigMap.APIVersion, manifestConfigMap.Kind) { + return true + } + } + return false + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) + Eventually(func() bool { + cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace). + Get(context.Background(), manifestConfigMap.Name, metav1.GetOptions{}) + if err != nil { + return false + } + + return reflect.DeepEqual(cm.Data, manifestConfigMap.Data) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("Upon successful work creation of multiple resources, all manifests are applied and resources are created", func() { + workName := utils.RandStr() + + // Secret will be included in this work object. + manifestSecretName := "work-secret" + manifestSecret := corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: manifestSecretName, + Namespace: workResourceNs.Name, + }, + Data: map[string][]byte{"secretData": []byte("testByte")}, + } + + manifestServiceName := "work-service" + manifestService := corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: manifestServiceName, + Namespace: workResourceNs.Name, + Labels: map[string]string{}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Port: 80, Protocol: corev1.ProtocolTCP}}, + Selector: map[string]string{"run": "test-nginx"}, + }, + } + + By(fmt.Sprintf("creating work %s/%s of %s and %s", workName, workNs.Name, manifestSecretName, manifestServiceName)) + testutils.CreateWork(workName, workNs.Name, ctx, *HubCluster, workList, []workapi.Manifest{ + { + RawExtension: runtime.RawExtension{Object: &manifestSecret}, + }, + { + RawExtension: runtime.RawExtension{Object: &manifestService}, + }, + }) + + // Wait for the applied works to be created on the member cluster + Eventually(func() error { + return MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) + + By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) + Eventually(func() bool { + work := workapi.Work{} + err := HubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) + if err != nil { + return false + } + + return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s and %s", manifestSecretName, manifestServiceName)) + Eventually(func() bool { + appliedWork := workapi.AppliedWork{} + err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + if err != nil { + return false + } + secretExists := false + podExists := false + for _, appliedResources := range appliedWork.Status.AppliedResources { + if testutils.AppliedWorkContainsResource(appliedResources, manifestSecret.Name, manifestSecret.APIVersion, manifestSecret.Kind) { + secretExists = true + } + if testutils.AppliedWorkContainsResource(appliedResources, manifestService.Name, manifestService.APIVersion, manifestService.Kind) { + podExists = true + } + } + + return secretExists && podExists + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + By(fmt.Sprintf("Resource %s and %s should have been created in cluster %s", manifestSecretName, manifestServiceName, MemberCluster.ClusterName)) + Eventually(func() bool { + _, secretErr := MemberCluster.KubeClientSet.CoreV1().Secrets(workResourceNs.Name).Get(context.Background(), manifestSecret.Name, metav1.GetOptions{}) + + _, podErr := MemberCluster.KubeClientSet.CoreV1().Services(workResourceNs.Name).Get(context.Background(), manifestService.Name, metav1.GetOptions{}) + + return secretErr == nil && podErr == nil + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }) + + It("Upon successful work creation of a CRD resource, manifest is applied, and resources are created", func() { + workName := utils.RandStr() + gvk, manifestCRD := testutils.GenerateCRDObjectFromFile("manifests/test-crd.yaml", genericCodec, *MemberCluster) + _, err := MemberCluster.RestMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + Expect(err).Should(Succeed(), "The Test CRD was not included in the RestMapper for Cluster %s", MemberCluster.ClusterName) + + By(fmt.Sprintf("creating work %s/%s of %s", workName, workNs.Name, gvk.Kind)) + testutils.CreateWork(workName, workNs.Name, ctx, *HubCluster, workList, []workapi.Manifest{ + { + RawExtension: manifestCRD, + }, + }) + + // Wait for the applied works to be created on the member cluster + Eventually(func() error { + return MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) + + By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) + Eventually(func() bool { + work := workapi.Work{} + err := HubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) + if err != nil { + return false + } + + return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", gvk.Kind)) + Eventually(func() bool { + appliedWork := workapi.AppliedWork{} + err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + if err != nil { + return false + } + for _, status := range appliedWork.Status.AppliedResources { + if testutils.AppliedWorkContainsResource(status, "testcrds.multicluster.x-k8s.io", gvk.Version, gvk.Kind) { + return true + } + } + + return false + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + + By(fmt.Sprintf("Resource %s should have been created in cluster %s", "testcrds.multicluster.x-k8s.io", MemberCluster.ClusterName)) + Eventually(func() error { + _, err := MemberCluster.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "testcrds.multicluster.x-k8s.io", metav1.GetOptions{}) + + return err + }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) + }) +}) diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index 2641ccff6..538432774 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -7,8 +7,7 @@ package e2e import ( "context" - "fmt" - "time" + "go.goms.io/fleet/test/e2e/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -24,33 +23,19 @@ import ( workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" - fleetutil "go.goms.io/fleet/pkg/utils" ) const ( - eventuallyTimeout = 10 * time.Second - eventuallyInterval = 500 * time.Millisecond + eventuallyTimeout = 90 // seconds + eventuallyInterval = 1 // seconds ) -var defaultWorkNamespace = fmt.Sprintf(fleetutil.NamespaceNameFormat, MemberCluster.ClusterName) - -var _ = Describe("work-api testing", Ordered, func() { - - wns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaultWorkNamespace, - }, - } - - BeforeAll(func() { - _, err := HubCluster.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), wns, metav1.CreateOptions{}) - Expect(err).Should(SatisfyAny(Succeed(), &fleetutil.AlreadyExistMatcher{})) - }) +var _ = Describe("work-api testing", func() { Context("with a Work resource that has two manifests: Deployment & Service", func() { var createdWork *workapi.Work var err error - var mDetails []manifestDetails + var mDetails []utils.ManifestDetails BeforeEach(func() { mDetails = generateManifestDetails([]string{ @@ -58,28 +43,28 @@ var _ = Describe("work-api testing", Ordered, func() { "manifests/test-service.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, mDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) }) It("should have created: a respective AppliedWork, and the resources specified in the Work's manifests", func() { By("verifying an AppliedWork was created") Eventually(func() error { - _, err := retrieveAppliedWork(createdWork.Name, MemberCluster) + _, err := utils.RetrieveAppliedWork(createdWork.Name, MemberCluster) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -102,7 +87,7 @@ var _ = Describe("work-api testing", Ordered, func() { By("verifying that corresponding conditions were created") Eventually(func() bool { - work, err := retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + work, err := utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) if err != nil { return false } @@ -116,8 +101,8 @@ var _ = Describe("work-api testing", Ordered, func() { var workOne *workapi.Work var workTwo *workapi.Work var err error - var manifestDetailsOne []manifestDetails - var manifestDetailsTwo []manifestDetails + var manifestDetailsOne []utils.ManifestDetails + var manifestDetailsTwo []utils.ManifestDetails BeforeEach(func() { manifestDetailsOne = generateManifestDetails([]string{ @@ -127,35 +112,38 @@ var _ = Describe("work-api testing", Ordered, func() { "manifests/test-deployment.yaml", }) - workOne = createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workOne = utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, manifestDetailsOne, ) - workTwo = createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workTwo = utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, manifestDetailsTwo) }) It("should apply both the works with duplicated manifest", func() { By("creating the work resources") - err = createWork(workOne, HubCluster) + err = utils.CreateWorkOld(workOne, HubCluster) Expect(err).ToNot(HaveOccurred()) - err = createWork(workTwo, HubCluster) + err = utils.CreateWorkOld(workTwo, HubCluster) Expect(err).ToNot(HaveOccurred()) + utils.WaitAppliedWorkPresent(workOne.Name, MemberCluster) + utils.WaitAppliedWorkPresent(workTwo.Name, MemberCluster) + By("Checking the Applied Work status of each to see both are applied.") Eventually(func() bool { - appliedWorkOne, err := retrieveAppliedWork(workOne.Name, MemberCluster) + appliedWorkOne, err := utils.RetrieveAppliedWork(workOne.Name, MemberCluster) if err != nil { return false } - appliedWorkTwo, err := retrieveAppliedWork(workTwo.Name, MemberCluster) + appliedWorkTwo, err := utils.RetrieveAppliedWork(workTwo.Name, MemberCluster) if err != nil { return false } @@ -165,11 +153,11 @@ var _ = Describe("work-api testing", Ordered, func() { By("Checking the work status of each works for verification") Eventually(func() bool { - workOne, err := retrieveWork(workOne.Namespace, workOne.Name, HubCluster) + workOne, err := utils.RetrieveWork(workOne.Namespace, workOne.Name, HubCluster) if err != nil { return false } - workTwo, err := retrieveWork(workTwo.Namespace, workTwo.Name, HubCluster) + workTwo, err := utils.RetrieveWork(workTwo.Namespace, workTwo.Name, HubCluster) if err != nil { return false } @@ -184,12 +172,20 @@ var _ = Describe("work-api testing", Ordered, func() { err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ Name: manifestDetailsOne[0].ObjMeta.Name, Namespace: manifestDetailsOne[0].ObjMeta.Namespace}, &deploy) - Expect(err).Should(Succeed()) + if err != nil { + return 0 + } + err = MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ + Name: manifestDetailsTwo[0].ObjMeta.Name, + Namespace: manifestDetailsTwo[0].ObjMeta.Namespace}, &deploy) + if err != nil { + return 0 + } return len(deploy.OwnerReferences) }, eventuallyTimeout, eventuallyInterval).Should(Equal(2)) By("delete the work two resources") - Expect(deleteWorkResource(workTwo, HubCluster)).To(Succeed()) + Expect(utils.DeleteWorkResource(workTwo, HubCluster)).To(Succeed()) By("Delete one work wont' delete the manifest") Eventually(func() int { @@ -201,7 +197,7 @@ var _ = Describe("work-api testing", Ordered, func() { }, eventuallyTimeout, eventuallyInterval).Should(Equal(1)) By("delete the work one resources") - err = deleteWorkResource(workOne, HubCluster) + err = utils.DeleteWorkResource(workOne, HubCluster) Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ @@ -215,33 +211,33 @@ var _ = Describe("work-api testing", Ordered, func() { Context("updating work with two newly added manifests: configmap & namespace", func() { var createdWork *workapi.Work var err error - var initialManifestDetails []manifestDetails - var addedManifestDetails []manifestDetails + var initialManifestDetails []utils.ManifestDetails + var addedManifestDetails []utils.ManifestDetails BeforeEach(func() { initialManifestDetails = generateManifestDetails([]string{ "manifests/test-secret.yaml", }) addedManifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap.ns.yaml", + "manifests/test-configmap2.ns.yaml", "manifests/test-namespace.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, initialManifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) err = MemberCluster.KubeClientSet.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Delete(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.DeleteOptions{}) @@ -250,12 +246,19 @@ var _ = Describe("work-api testing", Ordered, func() { It("should have created the ConfigMap in the new namespace", func() { By("retrieving the existing work and updating it by adding new manifests") + work := &workapi.Work{} Eventually(func() error { - createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) + work, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + return err + }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) + + work.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) + work, err = utils.UpdateWork(createdWork, HubCluster) + Expect(err).Should(Succeed()) - createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) - createdWork, err = updateWork(createdWork, HubCluster) + By("checking if the new Namespace was created") + Eventually(func() error { + _, err := MemberCluster.KubeClientSet.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -267,12 +270,6 @@ var _ = Describe("work-api testing", Ordered, func() { return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - By("checking if the new Namespace was created ") - Eventually(func() error { - _, err := MemberCluster.KubeClientSet.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) }) }) @@ -280,39 +277,39 @@ var _ = Describe("work-api testing", Ordered, func() { var configMap corev1.ConfigMap var createdWork *workapi.Work var err error - var manifestDetails []manifestDetails + var manifestDetails []utils.ManifestDetails var newDataKey string var newDataValue string BeforeEach(func() { manifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap.yaml", + "manifests/test-configmap2.yaml", }) - newDataKey = getWorkName(5) - newDataValue = getWorkName(5) + newDataKey = utils.GetWorkName(5) + newDataValue = utils.GetWorkName(5) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, manifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) }) It("should reapply the manifest's updated spec on the spoke cluster", func() { By("retrieving the existing work and modifying the manifest") Eventually(func() error { - createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + createdWork, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) // Extract and modify the ConfigMap by adding a new key value pair. err = json.Unmarshal(createdWork.Spec.Workload.Manifests[0].Raw, &configMap) @@ -321,7 +318,7 @@ var _ = Describe("work-api testing", Ordered, func() { obj, _, _ := genericCodec.Decode(rawUpdatedManifest, nil, nil) createdWork.Spec.Workload.Manifests[0].Object = obj createdWork.Spec.Workload.Manifests[0].Raw = rawUpdatedManifest - _, err = updateWork(createdWork, HubCluster) + _, err = utils.UpdateWork(createdWork, HubCluster) return err }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) @@ -337,8 +334,8 @@ var _ = Describe("work-api testing", Ordered, func() { var appliedWork *workapi.AppliedWork var createdWork *workapi.Work var err error - var originalManifestDetails []manifestDetails - var replacedManifestDetails []manifestDetails + var originalManifestDetails []utils.ManifestDetails + var replacedManifestDetails []utils.ManifestDetails resourcesStillExist := true BeforeEach(func() { @@ -346,44 +343,44 @@ var _ = Describe("work-api testing", Ordered, func() { "manifests/test-secret.yaml", }) replacedManifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap.yaml", + "manifests/test-configmap2.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, originalManifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) }) It("should have deleted the original Work's resources, and created new resources with the replaced manifests", func() { By("getting the respective AppliedWork") Eventually(func() int { - appliedWork, _ = retrieveAppliedWork(createdWork.Name, MemberCluster) + appliedWork, _ = utils.RetrieveAppliedWork(createdWork.Name, MemberCluster) return len(appliedWork.Status.AppliedResources) }, eventuallyTimeout, eventuallyInterval).Should(Equal(len(originalManifestDetails))) By("updating the Work resource with replaced manifests") Eventually(func() error { - createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + createdWork, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) createdWork.Spec.Workload.Manifests = nil for _, mD := range replacedManifestDetails { createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, mD.Manifest) } - createdWork, err = updateWork(createdWork, HubCluster) + createdWork, err = utils.UpdateWork(createdWork, HubCluster) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -422,23 +419,23 @@ var _ = Describe("work-api testing", Ordered, func() { Context("Work deletion", func() { var createdWork *workapi.Work var err error - var manifestDetails []manifestDetails + var manifestDetails []utils.ManifestDetails BeforeEach(func() { manifestDetails = generateManifestDetails([]string{ "manifests/test-secret.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.GetWorkName(5), + workNs.Name, manifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) @@ -451,7 +448,7 @@ var _ = Describe("work-api testing", Ordered, func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) By("deleting the Work resource") - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) By("verifying the resource was garbage collected") @@ -462,21 +459,16 @@ var _ = Describe("work-api testing", Ordered, func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) }) }) - - AfterAll(func() { - err := HubCluster.KubeClient.Delete(context.Background(), wns) - Expect(err).ToNot(HaveOccurred()) - }) }) -func generateManifestDetails(manifestFiles []string) []manifestDetails { - details := make([]manifestDetails, 0, len(manifestFiles)) +func generateManifestDetails(manifestFiles []string) []utils.ManifestDetails { + details := make([]utils.ManifestDetails, 0, len(manifestFiles)) for _, file := range manifestFiles { - detail := manifestDetails{} + detail := utils.ManifestDetails{} // Read files, create manifest - fileRaw, err := testManifestFiles.ReadFile(file) + fileRaw, err := utils.TestManifestFiles.ReadFile(file) Expect(err).ToNot(HaveOccurred()) obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) @@ -491,7 +483,7 @@ func generateManifestDetails(manifestFiles []string) []manifestDetails { Raw: jsonObj}, } - unstructuredObj, err := decodeUnstructured(detail.Manifest) + unstructuredObj, err := utils.DecodeUnstructured(detail.Manifest) Expect(err).ShouldNot(HaveOccurred()) mapping, err := MemberCluster.RestMapper.RESTMapping(unstructuredObj.GroupVersionKind().GroupKind(), unstructuredObj.GroupVersionKind().Version) diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index a5d482d4d..b49792b47 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -25,17 +25,18 @@ import ( var _ = Describe("workload orchestration testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount - var memberNS *corev1.Namespace + var memberNsName string var imc *v1alpha1.InternalMemberCluster var cr *rbacv1.ClusterRole var crp *v1alpha1.ClusterResourcePlacement BeforeEach(func() { - memberNS = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + memberNsName = fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName) + //memberNs = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) By("prepare resources in member cluster") // create testing NS in member cluster - testutils.CreateNamespace(*MemberCluster, memberNS) - sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNS.Name) + //testutils.CreateNamespace(*MemberCluster, memberNs) + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNsName) testutils.CreateServiceAccount(*MemberCluster, sa) By("deploy member cluster in the hub cluster") @@ -43,7 +44,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateMemberCluster(*HubCluster, mc) By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNS.Name) + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNsName) testutils.WaitInternalMemberCluster(*HubCluster, imc) By("check if internal member cluster condition is updated to Joined") @@ -54,15 +55,7 @@ var _ = Describe("workload orchestration testing", func() { AfterEach(func() { testutils.DeleteMemberCluster(*HubCluster, mc) - Eventually(func() bool { - err := HubCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) - testutils.DeleteNamespace(*MemberCluster, memberNS) - Eventually(func() bool { - err := MemberCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) + testutils.DeleteServiceAccount(*MemberCluster, sa) }) It("Apply CRP and check if work gets propagated", func() { @@ -104,7 +97,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateClusterResourcePlacement(*HubCluster, crp) By("check if work gets created for cluster resource placement") - testutils.WaitWork(*HubCluster, workName, memberNS.Name) + testutils.WaitWork(*HubCluster, workName, memberNs.Name) By("check if cluster resource placement is updated to Scheduled & Applied") testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, 3*testutils.PollTimeout) From 6b364141349d7b3f37ad8ade3522cb2ab81f2462 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Mon, 29 Aug 2022 19:00:08 -0700 Subject: [PATCH 02/36] edits from the comments --- test/e2e/utils/helper.go | 21 ++++++++-------- test/e2e/work_api_e2e_test.go | 45 +++++++++++++++++------------------ 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 68569aea1..1bb083c08 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -220,7 +220,7 @@ func WaitWork(cluster framework.Cluster, workName, workNamespace string) { gomega.Eventually(func() error { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) return err - }, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred()) + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s/%s not synced", workName, workNamespace) } // CreateNamespace create namespace and waits for namespace to exist. @@ -233,7 +233,7 @@ func CreateNamespace(cluster framework.Cluster, ns *corev1.Namespace) { gomega.Eventually(func() error { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: ns.Name, Namespace: ""}, ns) return err - }, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred()) + }, PollTimeout, PollInterval).Should(gomega.Succeed()) } // DeleteNamespace delete namespace. @@ -263,7 +263,7 @@ func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) } // CreateWork creates Work object based on manifest given. -func CreateWork(workName string, workNamespace string, ctx context.Context, hubCluster framework.Cluster, workList []workapi.Work, manifests []workapi.Manifest) { +func CreateWork(hubCluster framework.Cluster, workName string, workNamespace string, ctx context.Context, workList []workapi.Work, manifests []workapi.Manifest) { ginkgo.By(fmt.Sprintf("Creating Work with Name %s, %s", workName, workNamespace)) work := workapi.Work{ ObjectMeta: metav1.ObjectMeta{ @@ -278,8 +278,7 @@ func CreateWork(workName string, workNamespace string, ctx context.Context, hubC } workList = append(workList, work) - err := hubCluster.KubeClient.Create(ctx, &work) - gomega.Expect(err).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) + gomega.Expect(hubCluster.KubeClient.Create(ctx, &work)).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) } func DeleteWork(hubCluster framework.Cluster, workList []workapi.Work, ctx context.Context) error { @@ -300,22 +299,22 @@ func AppliedWorkContainsResource(resourceMeta workapi.AppliedResourceMeta, name return true } -func GenerateCRDObjectFromFile(filepath string, genericCodec runtime.Decoder, cluster framework.Cluster) (*schema.GroupVersionKind, runtime.RawExtension) { +func GenerateCRDObjectFromFile(cluster framework.Cluster, filepath string, genericCodec runtime.Decoder) (*schema.GroupVersionKind, runtime.RawExtension) { fileRaw, err := TestManifestFiles.ReadFile(filepath) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), "Reading manifest file %s failed", filepath) + gomega.Expect(err).Should(gomega.Succeed(), "Reading manifest file %s failed", filepath) obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), "Decoding manifest file %s failed", filepath) + gomega.Expect(err).Should(gomega.Succeed(), "Decoding manifest file %s failed", filepath) jsonObj, err := json.Marshal(obj) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), "Marshalling failed for file %s", filepath) + gomega.Expect(err).Should(gomega.Succeed(), "Marshalling failed for file %s", filepath) newObj := &unstructured.Unstructured{} err = newObj.UnmarshalJSON(jsonObj) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), "UnMarshaling failed for file %s", filepath) + gomega.Expect(err).Should(gomega.Succeed(), "UnMarshaling failed for file %s", filepath) _, err = cluster.RestMapper.RESTMapping(newObj.GroupVersionKind().GroupKind(), newObj.GroupVersionKind().Version) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "CRD data was not mapped in the restMapper") + gomega.Expect(err).Should(gomega.Succeed(), "CRD data was not mapped in the restMapper") return gvk, runtime.RawExtension{Object: obj, Raw: jsonObj} } diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 927d96291..8e0d59083 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -35,8 +35,7 @@ var _ = Describe("Work API Controller test", func() { AfterEach(func() { if len(workList) > 0 { - err := testutils.DeleteWork(*HubCluster, workList, ctx) - Expect(err).Should(Succeed(), "Deletion of work failed.") + Expect(testutils.DeleteWork(*HubCluster, workList, ctx)).Should(Succeed(), "Deletion of work failed.") } }) @@ -59,7 +58,7 @@ var _ = Describe("Work API Controller test", func() { }, } By(fmt.Sprintf("creating work %s/%s of %s", workName, workNs.Name, manifestConfigMapName)) - testutils.CreateWork(workName, workNs.Name, ctx, *HubCluster, workList, []workapi.Manifest{ + testutils.CreateWork(*HubCluster, workName, workNs.Name, ctx, workList, []workapi.Manifest{ { RawExtension: runtime.RawExtension{Object: &manifestConfigMap}, }, @@ -67,13 +66,13 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Waiting for AppliedWork %s to be created", workName)) Eventually(func() error { - return MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to create AppliedWork %s", workName) By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) Eventually(func() bool { work := workapi.Work{} - err := HubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) + err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) if err != nil { return false } @@ -84,7 +83,7 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) Eventually(func() bool { appliedWork := workapi.AppliedWork{} - err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork) if err != nil { return false } @@ -99,7 +98,7 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) Eventually(func() bool { cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace). - Get(context.Background(), manifestConfigMap.Name, metav1.GetOptions{}) + Get(ctx, manifestConfigMap.Name, metav1.GetOptions{}) if err != nil { return false } @@ -143,7 +142,7 @@ var _ = Describe("Work API Controller test", func() { } By(fmt.Sprintf("creating work %s/%s of %s and %s", workName, workNs.Name, manifestSecretName, manifestServiceName)) - testutils.CreateWork(workName, workNs.Name, ctx, *HubCluster, workList, []workapi.Manifest{ + testutils.CreateWork(*HubCluster, workName, workNs.Name, ctx, workList, []workapi.Manifest{ { RawExtension: runtime.RawExtension{Object: &manifestSecret}, }, @@ -154,13 +153,13 @@ var _ = Describe("Work API Controller test", func() { // Wait for the applied works to be created on the member cluster Eventually(func() error { - return MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) Eventually(func() bool { work := workapi.Work{} - err := HubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) + err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) if err != nil { return false } @@ -171,7 +170,7 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s and %s", manifestSecretName, manifestServiceName)) Eventually(func() bool { appliedWork := workapi.AppliedWork{} - err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork) if err != nil { return false } @@ -191,9 +190,9 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Resource %s and %s should have been created in cluster %s", manifestSecretName, manifestServiceName, MemberCluster.ClusterName)) Eventually(func() bool { - _, secretErr := MemberCluster.KubeClientSet.CoreV1().Secrets(workResourceNs.Name).Get(context.Background(), manifestSecret.Name, metav1.GetOptions{}) + _, secretErr := MemberCluster.KubeClientSet.CoreV1().Secrets(workResourceNs.Name).Get(ctx, manifestSecret.Name, metav1.GetOptions{}) - _, podErr := MemberCluster.KubeClientSet.CoreV1().Services(workResourceNs.Name).Get(context.Background(), manifestService.Name, metav1.GetOptions{}) + _, podErr := MemberCluster.KubeClientSet.CoreV1().Services(workResourceNs.Name).Get(ctx, manifestService.Name, metav1.GetOptions{}) return secretErr == nil && podErr == nil }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) @@ -201,12 +200,12 @@ var _ = Describe("Work API Controller test", func() { It("Upon successful work creation of a CRD resource, manifest is applied, and resources are created", func() { workName := utils.RandStr() - gvk, manifestCRD := testutils.GenerateCRDObjectFromFile("manifests/test-crd.yaml", genericCodec, *MemberCluster) + gvk, manifestCRD := testutils.GenerateCRDObjectFromFile(*MemberCluster, "manifests/test-crd.yaml", genericCodec) _, err := MemberCluster.RestMapper.RESTMapping(gvk.GroupKind(), gvk.Version) Expect(err).Should(Succeed(), "The Test CRD was not included in the RestMapper for Cluster %s", MemberCluster.ClusterName) By(fmt.Sprintf("creating work %s/%s of %s", workName, workNs.Name, gvk.Kind)) - testutils.CreateWork(workName, workNs.Name, ctx, *HubCluster, workList, []workapi.Manifest{ + testutils.CreateWork(*HubCluster, workName, workNs.Name, ctx, workList, []workapi.Manifest{ { RawExtension: manifestCRD, }, @@ -214,24 +213,24 @@ var _ = Describe("Work API Controller test", func() { // Wait for the applied works to be created on the member cluster Eventually(func() error { - return MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) + return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Waiting for AppliedWork %s failed", workName) By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) Eventually(func() bool { work := workapi.Work{} - err := HubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) + err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) if err != nil { return false } return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue(), "Applied Condition not True for work %s", workName) By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", gvk.Kind)) Eventually(func() bool { appliedWork := workapi.AppliedWork{} - err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: workName}, &appliedWork) + err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork) if err != nil { return false } @@ -242,13 +241,13 @@ var _ = Describe("Work API Controller test", func() { } return false - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue(), "AppliedWork %s does not portray correct resources created", workName) By(fmt.Sprintf("Resource %s should have been created in cluster %s", "testcrds.multicluster.x-k8s.io", MemberCluster.ClusterName)) Eventually(func() error { - _, err := MemberCluster.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "testcrds.multicluster.x-k8s.io", metav1.GetOptions{}) + _, err := MemberCluster.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, "testcrds.multicluster.x-k8s.io", metav1.GetOptions{}) return err - }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Resources %s not created in cluster %s", "testcrds.multicluster.x-k8s.io", MemberCluster.ClusterName) }) }) From ec33adaf4c22d9910ecb935f7d7d79099b672809 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 30 Aug 2022 22:56:44 -0700 Subject: [PATCH 03/36] Pushing changes based on comments --- test/e2e/e2e_test.go | 32 ++- test/e2e/framework/cluster.go | 14 +- test/e2e/join_leave_member_test.go | 8 +- test/e2e/utils/helper.go | 47 ++-- test/e2e/utils/manifests/test-configmap.yaml | 2 +- .../utils/manifests/test-configmap2.ns.yaml | 2 +- test/e2e/utils/manifests/test-configmap2.yaml | 2 +- test/e2e/utils/manifests/test-namespace.yaml | 2 +- test/e2e/utils/manifests/test-secret.yaml | 2 +- .../utils/manifests/test-serviceaccount.yaml | 2 +- test/e2e/work_api_e2e_test.go | 232 ++++-------------- test/e2e/work_api_test.go | 25 +- test/e2e/work_load_test.go | 12 +- 13 files changed, 116 insertions(+), 266 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 65337ac5b..f05080f44 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -6,13 +6,12 @@ package e2e import ( "fmt" - "go.goms.io/fleet/pkg/utils" - testutils "go.goms.io/fleet/test/e2e/utils" "os" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + testutils "go.goms.io/fleet/test/e2e/utils" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -21,6 +20,7 @@ import ( workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/test/e2e/framework" ) @@ -31,9 +31,15 @@ var ( MemberCluster = framework.NewCluster(memberClusterName, scheme) hubURL string scheme = runtime.NewScheme() - memberNs = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) - workNs = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) - workResourceNs = testutils.NewNamespace("resource-namespace") + + // This namespace in HubCluster will store Member cluster-related CRs, such as v1alpha1.MemberCluster + memberNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + + // This namespace in HubCluster will store v1alpha1.Work to simulate Work-related features in Hub Cluster. + workNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + + // This namespace in MemberCluster will store resources created from the Work-api. + workResourceNamespace = testutils.NewNamespace("resource-namespace") genericCodecs = serializer.NewCodecFactory(scheme) genericCodec = genericCodecs.UniversalDeserializer() @@ -53,10 +59,10 @@ func TestE2E(t *testing.T) { var _ = BeforeSuite(func() { kubeconfig := os.Getenv("KUBECONFIG") - Expect(kubeconfig).ShouldNot(BeEmpty()) + Expect(kubeconfig).ShouldNot(BeEmpty(), "Failure to retrieve kubeconfig") hubURL = os.Getenv("HUB_SERVER_URL") - Expect(hubURL).ShouldNot(BeEmpty()) + Expect(hubURL).ShouldNot(BeEmpty(), "Failure to retrieve Hub URL.") // hub setup HubCluster.HubURL = hubURL @@ -66,15 +72,15 @@ var _ = BeforeSuite(func() { MemberCluster.HubURL = hubURL framework.GetClusterClient(MemberCluster) - testutils.CreateNamespace(*MemberCluster, memberNs) + testutils.CreateNamespace(*MemberCluster, memberNamespace) - testutils.CreateNamespace(*HubCluster, workNs) - testutils.CreateNamespace(*MemberCluster, workResourceNs) + testutils.CreateNamespace(*HubCluster, workNamespace) + testutils.CreateNamespace(*MemberCluster, workResourceNamespace) }) var _ = AfterSuite(func() { - testutils.DeleteNamespace(*MemberCluster, memberNs) + testutils.DeleteNamespace(*MemberCluster, memberNamespace) - testutils.DeleteNamespace(*HubCluster, workNs) - testutils.DeleteNamespace(*MemberCluster, workResourceNs) + testutils.DeleteNamespace(*HubCluster, workNamespace) + testutils.DeleteNamespace(*MemberCluster, workResourceNamespace) }) diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index 9e0604623..a431a3f5b 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -7,7 +7,7 @@ package framework import ( "os" - "github.com/onsi/gomega" + . "github.com/onsi/gomega" // nolint:golint apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -47,23 +47,23 @@ func GetClusterClient(cluster *Cluster) { restConfig, err := clusterConfig.ClientConfig() if err != nil { - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred()) } cluster.KubeClient, err = client.New(restConfig, client.Options{Scheme: cluster.Scheme}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred()) cluster.KubeClientSet, err = kubernetes.NewForConfig(restConfig) - gomega.Expect(err).Should(gomega.Succeed()) + Expect(err).Should(Succeed()) cluster.APIExtensionClient, err = apiextension.NewForConfig(restConfig) - gomega.Expect(err).Should(gomega.Succeed()) + Expect(err).Should(Succeed()) cluster.DynamicClient, err = dynamic.NewForConfig(restConfig) - gomega.Expect(err).Should(gomega.Succeed()) + Expect(err).Should(Succeed()) cluster.RestMapper, err = apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) - gomega.Expect(err).Should(gomega.Succeed()) + Expect(err).Should(Succeed()) } func GetClientConfig(cluster *Cluster) clientcmd.ClientConfig { diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index 99d0fdb68..6562d332c 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -5,10 +5,8 @@ Licensed under the MIT license. package e2e import ( - "fmt" . "github.com/onsi/ginkgo/v2" "go.goms.io/fleet/apis/v1alpha1" - "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,12 +15,10 @@ import ( var _ = Describe("Join/leave member cluster testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount - var memberNsName string var imc *v1alpha1.InternalMemberCluster BeforeEach(func() { - memberNsName = fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName) - sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNsName) + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) By("deploy member cluster in the hub cluster") @@ -30,7 +26,7 @@ var _ = Describe("Join/leave member cluster testing", func() { testutils.CreateMemberCluster(*HubCluster, mc) By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNsName) + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNamespace.Name) testutils.WaitInternalMemberCluster(*HubCluster, imc) By("check if member cluster is marked as readyToJoin") diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 1bb083c08..38b1a496e 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -17,11 +17,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" "k8s.io/klog/v2" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -146,8 +143,7 @@ func WaitInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1.Internal func WaitConditionInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1.InternalMemberCluster, conditionType v1alpha1.AgentConditionType, status metav1.ConditionStatus, customTimeout time.Duration) { klog.Infof("Waiting for InternalMemberCluster(%s) condition(%s) status(%s) to be synced in the %s cluster", imc.Name, conditionType, status, cluster.ClusterName) gomega.Eventually(func() bool { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc) - if err != nil { + if err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc); err != nil { return false } cond := imc.GetConditionWithType(v1alpha1.MemberAgent, string(conditionType)) @@ -218,8 +214,7 @@ func WaitWork(cluster framework.Cluster, workName, workNamespace string) { var work workapi.Work klog.Infof("Waiting for Work(%s/%s) to be synced", workName, workNamespace) gomega.Eventually(func() error { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) - return err + return cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s/%s not synced", workName, workNamespace) } @@ -263,7 +258,7 @@ func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) } // CreateWork creates Work object based on manifest given. -func CreateWork(hubCluster framework.Cluster, workName string, workNamespace string, ctx context.Context, workList []workapi.Work, manifests []workapi.Manifest) { +func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName string, workNamespace string, workList []workapi.Work, manifests []workapi.Manifest) { ginkgo.By(fmt.Sprintf("Creating Work with Name %s, %s", workName, workNamespace)) work := workapi.Work{ ObjectMeta: metav1.ObjectMeta{ @@ -281,10 +276,14 @@ func CreateWork(hubCluster framework.Cluster, workName string, workNamespace str gomega.Expect(hubCluster.KubeClient.Create(ctx, &work)).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) } -func DeleteWork(hubCluster framework.Cluster, workList []workapi.Work, ctx context.Context) error { +// DeleteWork deletes all works used in the current test. +func DeleteWork(ctx context.Context, hubCluster framework.Cluster, workList []workapi.Work) error { if len(workList) > 0 { for _, work := range workList { err := hubCluster.KubeClient.Delete(ctx, &work) + if apierrors.IsNotFound(err) { + continue + } return err } } @@ -292,31 +291,13 @@ func DeleteWork(hubCluster framework.Cluster, workList []workapi.Work, ctx conte return nil } -func AppliedWorkContainsResource(resourceMeta workapi.AppliedResourceMeta, name string, version string, kind string) bool { - if resourceMeta.Name != name || resourceMeta.Version != version || resourceMeta.Kind != kind { - return false +// AddManifests adds manifests to be included within a Work Ob +func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) { + for _, obj := range objects { + manifests = append(manifests, workapi.Manifest{ + RawExtension: runtime.RawExtension{Object: obj}, + }) } - return true -} - -func GenerateCRDObjectFromFile(cluster framework.Cluster, filepath string, genericCodec runtime.Decoder) (*schema.GroupVersionKind, runtime.RawExtension) { - fileRaw, err := TestManifestFiles.ReadFile(filepath) - gomega.Expect(err).Should(gomega.Succeed(), "Reading manifest file %s failed", filepath) - - obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) - gomega.Expect(err).Should(gomega.Succeed(), "Decoding manifest file %s failed", filepath) - - jsonObj, err := json.Marshal(obj) - gomega.Expect(err).Should(gomega.Succeed(), "Marshalling failed for file %s", filepath) - - newObj := &unstructured.Unstructured{} - err = newObj.UnmarshalJSON(jsonObj) - gomega.Expect(err).Should(gomega.Succeed(), "UnMarshaling failed for file %s", filepath) - - _, err = cluster.RestMapper.RESTMapping(newObj.GroupVersionKind().GroupKind(), newObj.GroupVersionKind().Version) - gomega.Expect(err).Should(gomega.Succeed(), "CRD data was not mapped in the restMapper") - - return gvk, runtime.RawExtension{Object: obj, Raw: jsonObj} } // AlreadyExistMatcher matches the error to be already exist diff --git a/test/e2e/utils/manifests/test-configmap.yaml b/test/e2e/utils/manifests/test-configmap.yaml index ba3b7b987..675af6a86 100644 --- a/test/e2e/utils/manifests/test-configmap.yaml +++ b/test/e2e/utils/manifests/test-configmap.yaml @@ -6,4 +6,4 @@ metadata: data: fielda: one fieldb: two - fieldc: three \ No newline at end of file + fieldc: three diff --git a/test/e2e/utils/manifests/test-configmap2.ns.yaml b/test/e2e/utils/manifests/test-configmap2.ns.yaml index eeac617da..f627a63b3 100644 --- a/test/e2e/utils/manifests/test-configmap2.ns.yaml +++ b/test/e2e/utils/manifests/test-configmap2.ns.yaml @@ -4,4 +4,4 @@ metadata: name: test-configmap2 namespace: test-namespace data: - fielda: one \ No newline at end of file + fielda: one diff --git a/test/e2e/utils/manifests/test-configmap2.yaml b/test/e2e/utils/manifests/test-configmap2.yaml index 2c43bcd62..d3dc4884f 100644 --- a/test/e2e/utils/manifests/test-configmap2.yaml +++ b/test/e2e/utils/manifests/test-configmap2.yaml @@ -6,4 +6,4 @@ metadata: data: fielda: one fieldb: two - fieldc: three \ No newline at end of file + fieldc: three diff --git a/test/e2e/utils/manifests/test-namespace.yaml b/test/e2e/utils/manifests/test-namespace.yaml index 10dfe3552..f8db044d1 100644 --- a/test/e2e/utils/manifests/test-namespace.yaml +++ b/test/e2e/utils/manifests/test-namespace.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: test-namespace \ No newline at end of file + name: test-namespace diff --git a/test/e2e/utils/manifests/test-secret.yaml b/test/e2e/utils/manifests/test-secret.yaml index fa2d65dd1..c6450fef2 100644 --- a/test/e2e/utils/manifests/test-secret.yaml +++ b/test/e2e/utils/manifests/test-secret.yaml @@ -5,4 +5,4 @@ metadata: namespace: default data: somekey: dGVzdA== -type: generic \ No newline at end of file +type: generic diff --git a/test/e2e/utils/manifests/test-serviceaccount.yaml b/test/e2e/utils/manifests/test-serviceaccount.yaml index c2c6600fc..5b5b1f9fa 100644 --- a/test/e2e/utils/manifests/test-serviceaccount.yaml +++ b/test/e2e/utils/manifests/test-serviceaccount.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: test-serviceaccount - namespace: default \ No newline at end of file + namespace: default diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 8e0d59083..15a50a158 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -3,14 +3,13 @@ package e2e import ( "context" "fmt" - "k8s.io/apimachinery/pkg/api/meta" - "reflect" - + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -25,8 +24,18 @@ var _ = Describe("Work API Controller test", func() { ) var ( - ctx context.Context - workList []workapi.Work + ctx context.Context + + // Includes all works applied to the hub cluster. Used for garbage collection. + works []workapi.Work + + // Includes all manifests to be within a Work object. + manifests []workapi.Manifest + + // Comparison Options + cmpOptions = []cmp.Option{ + cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), + } ) BeforeEach(func() { @@ -34,13 +43,11 @@ var _ = Describe("Work API Controller test", func() { }) AfterEach(func() { - if len(workList) > 0 { - Expect(testutils.DeleteWork(*HubCluster, workList, ctx)).Should(Succeed(), "Deletion of work failed.") - } + Expect(testutils.DeleteWork(ctx, *HubCluster, works)).Should(Succeed(), "Deletion of work failed.") }) It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { - workName := utils.RandStr() + workName := testutils.GetWorkName(5) // Configmap will be included in this work object. manifestConfigMapName := "work-configmap" @@ -51,29 +58,26 @@ var _ = Describe("Work API Controller test", func() { }, ObjectMeta: metav1.ObjectMeta{ Name: manifestConfigMapName, - Namespace: workResourceNs.Name, + Namespace: workResourceNamespace.Name, }, Data: map[string]string{ "test-key": "test-data", }, } - By(fmt.Sprintf("creating work %s/%s of %s", workName, workNs.Name, manifestConfigMapName)) - testutils.CreateWork(*HubCluster, workName, workNs.Name, ctx, workList, []workapi.Manifest{ - { - RawExtension: runtime.RawExtension{Object: &manifestConfigMap}, - }, - }) + + testutils.AddManifests([]runtime.Object{&manifestConfigMap}, manifests) + By(fmt.Sprintf("creating work %s/%s of %s", workName, workNamespace.Name, manifestConfigMapName)) + testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, works, manifests) By(fmt.Sprintf("Waiting for AppliedWork %s to be created", workName)) Eventually(func() error { return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to create AppliedWork %s", workName) - By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) + By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) Eventually(func() bool { work := workapi.Work{} - err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) - if err != nil { + if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &work); err != nil { return false } @@ -81,173 +85,37 @@ var _ = Describe("Work API Controller test", func() { }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) - Eventually(func() bool { - appliedWork := workapi.AppliedWork{} - err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork) - if err != nil { - return false - } - for _, status := range appliedWork.Status.AppliedResources { - if testutils.AppliedWorkContainsResource(status, manifestConfigMap.Name, manifestConfigMap.APIVersion, manifestConfigMap.Kind) { - return true - } - } - return false - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) - - By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) - Eventually(func() bool { - cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace). - Get(ctx, manifestConfigMap.Name, metav1.GetOptions{}) - if err != nil { - return false - } - - return reflect.DeepEqual(cm.Data, manifestConfigMap.Data) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) - }) - - It("Upon successful work creation of multiple resources, all manifests are applied and resources are created", func() { - workName := utils.RandStr() - - // Secret will be included in this work object. - manifestSecretName := "work-secret" - manifestSecret := corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: manifestSecretName, - Namespace: workResourceNs.Name, - }, - Data: map[string][]byte{"secretData": []byte("testByte")}, - } - - manifestServiceName := "work-service" - manifestService := corev1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: manifestServiceName, - Namespace: workResourceNs.Name, - Labels: map[string]string{}, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{{Port: 80, Protocol: corev1.ProtocolTCP}}, - Selector: map[string]string{"run": "test-nginx"}, - }, - } - - By(fmt.Sprintf("creating work %s/%s of %s and %s", workName, workNs.Name, manifestSecretName, manifestServiceName)) - testutils.CreateWork(*HubCluster, workName, workNs.Name, ctx, workList, []workapi.Manifest{ - { - RawExtension: runtime.RawExtension{Object: &manifestSecret}, - }, - { - RawExtension: runtime.RawExtension{Object: &manifestService}, - }, - }) - - // Wait for the applied works to be created on the member cluster - Eventually(func() error { - return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeNil()) - - By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) - Eventually(func() bool { - work := workapi.Work{} - err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) - if err != nil { - return false - } - - return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) - - By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s and %s", manifestSecretName, manifestServiceName)) - Eventually(func() bool { + Eventually(func() string { appliedWork := workapi.AppliedWork{} - err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork) - if err != nil { - return false + if err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork); err != nil { + return err.Error() } - secretExists := false - podExists := false - for _, appliedResources := range appliedWork.Status.AppliedResources { - if testutils.AppliedWorkContainsResource(appliedResources, manifestSecret.Name, manifestSecret.APIVersion, manifestSecret.Kind) { - secretExists = true - } - if testutils.AppliedWorkContainsResource(appliedResources, manifestService.Name, manifestService.APIVersion, manifestService.Kind) { - podExists = true - } + if len(appliedWork.Status.AppliedResources) == 0 { + return fmt.Sprintf("Applied Work Meta not created for resource %s", manifestConfigMapName) } - - return secretExists && podExists - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) - - By(fmt.Sprintf("Resource %s and %s should have been created in cluster %s", manifestSecretName, manifestServiceName, MemberCluster.ClusterName)) - Eventually(func() bool { - _, secretErr := MemberCluster.KubeClientSet.CoreV1().Secrets(workResourceNs.Name).Get(ctx, manifestSecret.Name, metav1.GetOptions{}) - - _, podErr := MemberCluster.KubeClientSet.CoreV1().Services(workResourceNs.Name).Get(ctx, manifestService.Name, metav1.GetOptions{}) - - return secretErr == nil && podErr == nil - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) - }) - - It("Upon successful work creation of a CRD resource, manifest is applied, and resources are created", func() { - workName := utils.RandStr() - gvk, manifestCRD := testutils.GenerateCRDObjectFromFile(*MemberCluster, "manifests/test-crd.yaml", genericCodec) - _, err := MemberCluster.RestMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - Expect(err).Should(Succeed(), "The Test CRD was not included in the RestMapper for Cluster %s", MemberCluster.ClusterName) - - By(fmt.Sprintf("creating work %s/%s of %s", workName, workNs.Name, gvk.Kind)) - testutils.CreateWork(*HubCluster, workName, workNs.Name, ctx, workList, []workapi.Manifest{ - { - RawExtension: manifestCRD, - }, - }) - - // Wait for the applied works to be created on the member cluster - Eventually(func() error { - return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Waiting for AppliedWork %s failed", workName) - - By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNs.Name)) - Eventually(func() bool { - work := workapi.Work{} - err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNs.Name}, &work) - if err != nil { - return false + want := workapi.AppliedResourceMeta{ + ResourceIdentifier: workapi.ResourceIdentifier{ + Ordinal: 0, + Group: manifestConfigMap.GroupVersionKind().Group, + Version: manifestConfigMap.GroupVersionKind().Version, + Kind: manifestConfigMap.GroupVersionKind().Kind, + Namespace: manifestConfigMap.Namespace, + Name: manifestConfigMap.Name, + Resource: "configmap", + }, } + return cmp.Diff(want, appliedWork.Status.AppliedResources[0], cmpOptions...) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate AppliedResourceMeta mismatch (-want, +got)") - return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue(), "Applied Condition not True for work %s", workName) - - By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", gvk.Kind)) - Eventually(func() bool { - appliedWork := workapi.AppliedWork{} - err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork) - if err != nil { - return false - } - for _, status := range appliedWork.Status.AppliedResources { - if testutils.AppliedWorkContainsResource(status, "testcrds.multicluster.x-k8s.io", gvk.Version, gvk.Kind) { - return true - } + By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) + Eventually(func() string { + if cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace). + Get(ctx, manifestConfigMap.Name, metav1.GetOptions{}); err != nil { + return err.Error() + } else { + return cmp.Diff(cm.Data, manifestConfigMap.Data) } - - return false - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue(), "AppliedWork %s does not portray correct resources created", workName) - - By(fmt.Sprintf("Resource %s should have been created in cluster %s", "testcrds.multicluster.x-k8s.io", MemberCluster.ClusterName)) - Eventually(func() error { - _, err := MemberCluster.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, "testcrds.multicluster.x-k8s.io", metav1.GetOptions{}) - - return err - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Resources %s not created in cluster %s", "testcrds.multicluster.x-k8s.io", MemberCluster.ClusterName) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), + "ConfigMap %s was not created in the cluster %s", manifestConfigMapName, MemberCluster.ClusterName) }) }) diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index 538432774..06f924a63 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -45,7 +45,7 @@ var _ = Describe("work-api testing", func() { workObj := utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, mDetails, ) @@ -114,13 +114,13 @@ var _ = Describe("work-api testing", func() { workOne = utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, manifestDetailsOne, ) workTwo = utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, manifestDetailsTwo) }) @@ -225,7 +225,7 @@ var _ = Describe("work-api testing", func() { workObj := utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, initialManifestDetails, ) @@ -248,14 +248,15 @@ var _ = Describe("work-api testing", func() { By("retrieving the existing work and updating it by adding new manifests") work := &workapi.Work{} Eventually(func() error { - work, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + if work, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster); err != nil { + return err + } + + work.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) + work, err = utils.UpdateWork(createdWork, HubCluster) return err }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) - work.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) - work, err = utils.UpdateWork(createdWork, HubCluster) - Expect(err).Should(Succeed()) - By("checking if the new Namespace was created") Eventually(func() error { _, err := MemberCluster.KubeClientSet.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) @@ -290,7 +291,7 @@ var _ = Describe("work-api testing", func() { workObj := utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, manifestDetails, ) @@ -348,7 +349,7 @@ var _ = Describe("work-api testing", func() { workObj := utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, originalManifestDetails, ) @@ -428,7 +429,7 @@ var _ = Describe("work-api testing", func() { workObj := utils.CreateWorkObj( utils.GetWorkName(5), - workNs.Name, + workNamespace.Name, manifestDetails, ) diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index b49792b47..d5b96d6ca 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -25,18 +25,16 @@ import ( var _ = Describe("workload orchestration testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount - var memberNsName string var imc *v1alpha1.InternalMemberCluster var cr *rbacv1.ClusterRole var crp *v1alpha1.ClusterResourcePlacement BeforeEach(func() { - memberNsName = fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName) - //memberNs = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + //memberNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) By("prepare resources in member cluster") // create testing NS in member cluster - //testutils.CreateNamespace(*MemberCluster, memberNs) - sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNsName) + //testutils.CreateNamespace(*MemberCluster, memberNamespace) + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) By("deploy member cluster in the hub cluster") @@ -44,7 +42,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateMemberCluster(*HubCluster, mc) By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNsName) + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNamespace.Name) testutils.WaitInternalMemberCluster(*HubCluster, imc) By("check if internal member cluster condition is updated to Joined") @@ -97,7 +95,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateClusterResourcePlacement(*HubCluster, crp) By("check if work gets created for cluster resource placement") - testutils.WaitWork(*HubCluster, workName, memberNs.Name) + testutils.WaitWork(*HubCluster, workName, memberNamespace.Name) By("check if cluster resource placement is updated to Scheduled & Applied") testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, 3*testutils.PollTimeout) From 8ec22a67c7d55927591b1c1f84a1e167aef8b8bb Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 31 Aug 2022 16:06:05 -0700 Subject: [PATCH 04/36] lint fix --- test/e2e/utils/helper.go | 4 ---- test/e2e/work_load_test.go | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 38b1a496e..1f0ae9302 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -26,10 +26,6 @@ import ( "go.goms.io/fleet/test/e2e/framework" ) -const ( - conditionTypeApplied = "Applied" -) - var ( // PollInterval defines the interval time for a poll operation. PollInterval = 5 * time.Second diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index d5b96d6ca..60d4f655e 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -8,6 +8,7 @@ package e2e import ( "context" "fmt" + "go.goms.io/fleet/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -18,7 +19,6 @@ import ( "k8s.io/apimachinery/pkg/types" "go.goms.io/fleet/apis/v1alpha1" - "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" ) @@ -30,10 +30,8 @@ var _ = Describe("workload orchestration testing", func() { var crp *v1alpha1.ClusterResourcePlacement BeforeEach(func() { - //memberNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) By("prepare resources in member cluster") // create testing NS in member cluster - //testutils.CreateNamespace(*MemberCluster, memberNamespace) sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) From 0cd18fa5420f3b429ce5b344a533f5de67ee252a Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 31 Aug 2022 16:58:04 -0700 Subject: [PATCH 05/36] Removing one of the namespaces for duplicate namespace error --- test/e2e/e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index f05080f44..255301161 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -32,7 +32,7 @@ var ( hubURL string scheme = runtime.NewScheme() - // This namespace in HubCluster will store Member cluster-related CRs, such as v1alpha1.MemberCluster + // This namespace will store Member cluster-related CRs, such as v1alpha1.MemberCluster memberNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) // This namespace in HubCluster will store v1alpha1.Work to simulate Work-related features in Hub Cluster. From 5e0c34843d87d971ba814d3763f45db802b68796 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 31 Aug 2022 17:21:37 -0700 Subject: [PATCH 06/36] lint fixes --- test/e2e/framework/cluster.go | 14 +++++++------- test/e2e/work_load_test.go | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index a431a3f5b..d3566c598 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -7,7 +7,7 @@ package framework import ( "os" - . "github.com/onsi/gomega" // nolint:golint + "github.com/onsi/gomega" apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -47,23 +47,23 @@ func GetClusterClient(cluster *Cluster) { restConfig, err := clusterConfig.ClientConfig() if err != nil { - Expect(err).ShouldNot(HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up rest config") } cluster.KubeClient, err = client.New(restConfig, client.Options{Scheme: cluster.Scheme}) - Expect(err).ShouldNot(HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Kube Client") cluster.KubeClientSet, err = kubernetes.NewForConfig(restConfig) - Expect(err).Should(Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up KubeClient Set") cluster.APIExtensionClient, err = apiextension.NewForConfig(restConfig) - Expect(err).Should(Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up API Extension Client.") cluster.DynamicClient, err = dynamic.NewForConfig(restConfig) - Expect(err).Should(Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Dynamic Client") cluster.RestMapper, err = apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) - Expect(err).Should(Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Rest Mapper") } func GetClientConfig(cluster *Cluster) clientcmd.ClientConfig { diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index 60d4f655e..054e10df8 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -8,7 +8,6 @@ package e2e import ( "context" "fmt" - "go.goms.io/fleet/pkg/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -19,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/types" "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" ) From 9380dccb4ba1fb7560193cef4ed6c5616ad64f73 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 1 Sep 2022 12:39:43 -0700 Subject: [PATCH 07/36] debug + fixed the error --- test/e2e/e2e_test.go | 4 +-- test/e2e/utils/helper.go | 36 ++++++++++++++++----------- test/e2e/utils/work_api_test_utils.go | 5 ---- test/e2e/work_api_e2e_test.go | 19 ++++++++++---- test/e2e/work_load_test.go | 8 +++--- 5 files changed, 41 insertions(+), 31 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 255301161..da5d57cf3 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -12,7 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" testutils "go.goms.io/fleet/test/e2e/utils" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -49,7 +49,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) utilruntime.Must(workv1alpha1.AddToScheme(scheme)) - utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) } func TestE2E(t *testing.T) { diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 1f0ae9302..1168f0cbf 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -8,17 +8,19 @@ import ( "context" "embed" "fmt" - "github.com/onsi/gomega/format" "time" + // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/onsi/gomega/format" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -206,11 +208,11 @@ func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.Clu } // WaitWork waits for Work to be present on the hub cluster. -func WaitWork(cluster framework.Cluster, workName, workNamespace string) { - var work workapi.Work +func WaitWork(ctx context.Context, cluster framework.Cluster, workName, workNamespace string) { klog.Infof("Waiting for Work(%s/%s) to be synced", workName, workNamespace) gomega.Eventually(func() error { - return cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) + var work workapi.Work + return cluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s/%s not synced", workName, workNamespace) } @@ -218,7 +220,7 @@ func WaitWork(cluster framework.Cluster, workName, workNamespace string) { func CreateNamespace(cluster framework.Cluster, ns *corev1.Namespace) { ginkgo.By(fmt.Sprintf("Creating Namespace(%s)", ns.Name), func() { err := cluster.KubeClient.Create(context.TODO(), ns) - gomega.Expect(err).Should(gomega.Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to create namespace %s", ns.Name) }) klog.Infof("Waiting for Namespace(%s) to be synced", ns.Name) gomega.Eventually(func() error { @@ -254,8 +256,9 @@ func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) } // CreateWork creates Work object based on manifest given. -func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName string, workNamespace string, workList []workapi.Work, manifests []workapi.Manifest) { +func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName string, workNamespace string, manifests []workapi.Manifest) { ginkgo.By(fmt.Sprintf("Creating Work with Name %s, %s", workName, workNamespace)) + work := workapi.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, @@ -268,19 +271,16 @@ func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName stri }, } - workList = append(workList, work) gomega.Expect(hubCluster.KubeClient.Create(ctx, &work)).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) } // DeleteWork deletes all works used in the current test. -func DeleteWork(ctx context.Context, hubCluster framework.Cluster, workList []workapi.Work) error { - if len(workList) > 0 { - for _, work := range workList { - err := hubCluster.KubeClient.Delete(ctx, &work) - if apierrors.IsNotFound(err) { - continue +func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []workapi.Work) error { + if len(works) > 0 { + for _, work := range works { + if err := hubCluster.KubeClient.Delete(ctx, &work); err != nil && !apierrors.IsNotFound(err) { + return err } - return err } } @@ -288,12 +288,18 @@ func DeleteWork(ctx context.Context, hubCluster framework.Cluster, workList []wo } // AddManifests adds manifests to be included within a Work Ob -func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) { +func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []workapi.Manifest { for _, obj := range objects { manifests = append(manifests, workapi.Manifest{ RawExtension: runtime.RawExtension{Object: obj}, }) } + return manifests +} + +// GetWorkName creates a work name in a correct format for e2e tests. +func GetWorkName(length int) string { + return "work" + rand.String(length) } // AlreadyExistMatcher matches the error to be already exist diff --git a/test/e2e/utils/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go index 0a73f7750..89b0e616e 100644 --- a/test/e2e/utils/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -14,7 +14,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/test/e2e/framework" @@ -98,7 +97,3 @@ func WaitAppliedWorkPresent(workName string, memberCluster *framework.Cluster) { return err }, PollTimeout, PollInterval).Should(gomega.BeNil()) } - -func GetWorkName(length int) string { - return "work" + rand.String(length) -} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 15a50a158..d346f65a7 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -3,17 +3,19 @@ package e2e import ( "context" "fmt" + "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - testutils "go.goms.io/fleet/test/e2e/utils" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + + testutils "go.goms.io/fleet/test/e2e/utils" ) // TODO: when join/leave logic is connected to work-api, join the Hub and Member for this test. @@ -40,6 +42,9 @@ var _ = Describe("Work API Controller test", func() { BeforeEach(func() { ctx = context.Background() + + //Empties the works since they were garbage collected earlier. + works = []workapi.Work{} }) AfterEach(func() { @@ -48,6 +53,7 @@ var _ = Describe("Work API Controller test", func() { It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { workName := testutils.GetWorkName(5) + By(fmt.Sprintf("Here is the work Name %s", workName)) // Configmap will be included in this work object. manifestConfigMapName := "work-configmap" @@ -65,9 +71,11 @@ var _ = Describe("Work API Controller test", func() { }, } - testutils.AddManifests([]runtime.Object{&manifestConfigMap}, manifests) + manifests = testutils.AddManifests([]runtime.Object{&manifestConfigMap}, manifests) By(fmt.Sprintf("creating work %s/%s of %s", workName, workNamespace.Name, manifestConfigMapName)) - testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, works, manifests) + testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) + + testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) By(fmt.Sprintf("Waiting for AppliedWork %s to be created", workName)) Eventually(func() error { @@ -87,9 +95,10 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) Eventually(func() string { appliedWork := workapi.AppliedWork{} - if err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &appliedWork); err != nil { + if err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork); err != nil { return err.Error() } + if len(appliedWork.Status.AppliedResources) == 0 { return fmt.Sprintf("Applied Work Meta not created for resource %s", manifestConfigMapName) } @@ -101,7 +110,7 @@ var _ = Describe("Work API Controller test", func() { Kind: manifestConfigMap.GroupVersionKind().Kind, Namespace: manifestConfigMap.Namespace, Name: manifestConfigMap.Name, - Resource: "configmap", + Resource: "configmaps", }, } return cmp.Diff(want, appliedWork.Status.AppliedResources[0], cmpOptions...) diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index 054e10df8..a9edf1414 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -7,8 +7,6 @@ package e2e import ( "context" - "fmt" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -18,7 +16,6 @@ import ( "k8s.io/apimachinery/pkg/types" "go.goms.io/fleet/apis/v1alpha1" - "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" ) @@ -28,8 +25,11 @@ var _ = Describe("workload orchestration testing", func() { var imc *v1alpha1.InternalMemberCluster var cr *rbacv1.ClusterRole var crp *v1alpha1.ClusterResourcePlacement + var ctx context.Context BeforeEach(func() { + ctx = context.Background() + By("prepare resources in member cluster") // create testing NS in member cluster sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) @@ -93,7 +93,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateClusterResourcePlacement(*HubCluster, crp) By("check if work gets created for cluster resource placement") - testutils.WaitWork(*HubCluster, workName, memberNamespace.Name) + testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) By("check if cluster resource placement is updated to Scheduled & Applied") testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, 3*testutils.PollTimeout) From 714fa65e7241104409333f164aad09318a7506b0 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 1 Sep 2022 13:21:47 -0700 Subject: [PATCH 08/36] debug + fixed the error --- test/e2e/e2e_test.go | 4 ++-- test/e2e/join_leave_member_test.go | 5 +++-- test/e2e/utils/helper.go | 7 +++++-- test/e2e/utils/work_api_test_utils.go | 1 + test/e2e/work_load_test.go | 1 + 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index da5d57cf3..39d7f2fdc 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -11,8 +11,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - testutils "go.goms.io/fleet/test/e2e/utils" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -22,6 +21,7 @@ import ( "go.goms.io/fleet/apis/v1alpha1" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/test/e2e/framework" + testutils "go.goms.io/fleet/test/e2e/utils" ) var ( diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index 6562d332c..81ed4e6e0 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -6,10 +6,11 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" - "go.goms.io/fleet/apis/v1alpha1" - testutils "go.goms.io/fleet/test/e2e/utils" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "go.goms.io/fleet/apis/v1alpha1" + testutils "go.goms.io/fleet/test/e2e/utils" ) var _ = Describe("Join/leave member cluster testing", func() { diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 1168f0cbf..d7b61141d 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -10,6 +10,8 @@ import ( "fmt" "time" + "k8s.io/apimachinery/pkg/util/rand" + // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -277,8 +279,9 @@ func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName stri // DeleteWork deletes all works used in the current test. func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []workapi.Work) error { if len(works) > 0 { - for _, work := range works { - if err := hubCluster.KubeClient.Delete(ctx, &work); err != nil && !apierrors.IsNotFound(err) { + // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." + for i := range works { + if err := hubCluster.KubeClient.Delete(ctx, &works[i]); err != nil && !apierrors.IsNotFound(err) { return err } } diff --git a/test/e2e/utils/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go index 89b0e616e..afc604864 100644 --- a/test/e2e/utils/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -8,6 +8,7 @@ package utils import ( "context" "fmt" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index a9edf1414..d53d18028 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -7,6 +7,7 @@ package e2e import ( "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" From e8032d28f0038aa2294d4403697fd6a1e2c2cefd Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 1 Sep 2022 13:27:44 -0700 Subject: [PATCH 09/36] lint fix --- test/e2e/work_api_e2e_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index d346f65a7..ec9f2545b 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -118,12 +118,11 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) Eventually(func() string { - if cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace). - Get(ctx, manifestConfigMap.Name, metav1.GetOptions{}); err != nil { + cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMap.Name, metav1.GetOptions{}) + if err != nil { return err.Error() - } else { - return cmp.Diff(cm.Data, manifestConfigMap.Data) } + return cmp.Diff(cm.Data, manifestConfigMap.Data) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "ConfigMap %s was not created in the cluster %s", manifestConfigMapName, MemberCluster.ClusterName) }) From 361a143784da7200068da4f0065e7424b5b4bc4f Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 1 Sep 2022 14:47:36 -0700 Subject: [PATCH 10/36] debugging --- test/e2e/e2e_test.go | 1 + test/e2e/utils/work_api_test_utils.go | 2 -- test/e2e/work_api_test.go | 5 ++--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 39d7f2fdc..a183a5479 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -41,6 +41,7 @@ var ( // This namespace in MemberCluster will store resources created from the Work-api. workResourceNamespace = testutils.NewNamespace("resource-namespace") + // Used to decode an unstructured object. genericCodecs = serializer.NewCodecFactory(scheme) genericCodec = genericCodecs.UniversalDeserializer() ) diff --git a/test/e2e/utils/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go index afc604864..e65e56d89 100644 --- a/test/e2e/utils/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -71,8 +71,6 @@ func RetrieveWork(workNamespace string, workName string, hubCluster *framework.C workRetrieved := workapi.Work{} err := hubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Namespace: workNamespace, Name: workName}, &workRetrieved) if err != nil { - println("err still exists") - println(err.Error()) return nil, err } return &workRetrieved, nil diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index 06f924a63..362b95d41 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -252,8 +252,8 @@ var _ = Describe("work-api testing", func() { return err } - work.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) - work, err = utils.UpdateWork(createdWork, HubCluster) + work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) + work, err = utils.UpdateWork(work, HubCluster) return err }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) @@ -380,7 +380,6 @@ var _ = Describe("work-api testing", func() { for _, mD := range replacedManifestDetails { createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, mD.Manifest) } - createdWork, err = utils.UpdateWork(createdWork, HubCluster) return err From 88607f93cda1ccf187463403f2b7464afa9f7918 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 1 Sep 2022 17:47:41 -0700 Subject: [PATCH 11/36] more formatting --- test/e2e/utils/helper.go | 11 +++-------- test/e2e/work_api_e2e_test.go | 11 +++++++---- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index d7b61141d..2e20c7520 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -10,8 +10,6 @@ import ( "fmt" "time" - "k8s.io/apimachinery/pkg/util/rand" - // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -27,6 +25,7 @@ import ( workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/test/e2e/framework" ) @@ -277,17 +276,13 @@ func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName stri } // DeleteWork deletes all works used in the current test. -func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []workapi.Work) error { +func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []workapi.Work) { if len(works) > 0 { // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." for i := range works { - if err := hubCluster.KubeClient.Delete(ctx, &works[i]); err != nil && !apierrors.IsNotFound(err) { - return err - } + gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work failed.") } } - - return nil } // AddManifests adds manifests to be included within a Work Ob diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index ec9f2545b..1f9c12582 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -48,7 +48,7 @@ var _ = Describe("Work API Controller test", func() { }) AfterEach(func() { - Expect(testutils.DeleteWork(ctx, *HubCluster, works)).Should(Succeed(), "Deletion of work failed.") + testutils.DeleteWork(ctx, *HubCluster, works) }) It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { @@ -79,13 +79,15 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Waiting for AppliedWork %s to be created", workName)) Eventually(func() error { - return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) + return MemberCluster.KubeClient.Get(ctx, + types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to create AppliedWork %s", workName) By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) Eventually(func() bool { work := workapi.Work{} - if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &work); err != nil { + if err := HubCluster.KubeClient.Get(ctx, + types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &work); err != nil { return false } @@ -95,7 +97,8 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) Eventually(func() string { appliedWork := workapi.AppliedWork{} - if err := MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork); err != nil { + if err := MemberCluster.KubeClient.Get(ctx, + types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork); err != nil { return err.Error() } From 1978699380669c3520001b5c432ebdeff53d1937 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 1 Sep 2022 18:06:47 -0700 Subject: [PATCH 12/36] more formatting --- test/e2e/utils/helper.go | 4 ++-- test/e2e/work_api_e2e_test.go | 2 +- test/e2e/work_api_test.go | 18 +++++++++--------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 2e20c7520..6e3ac879a 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -295,8 +295,8 @@ func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []work return manifests } -// GetWorkName creates a work name in a correct format for e2e tests. -func GetWorkName(length int) string { +// RandomWorkName creates a work name in a correct format for e2e tests. +func RandomWorkName(length int) string { return "work" + rand.String(length) } diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 1f9c12582..d99734dbd 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -52,7 +52,7 @@ var _ = Describe("Work API Controller test", func() { }) It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { - workName := testutils.GetWorkName(5) + workName := testutils.RandomWorkName(5) By(fmt.Sprintf("Here is the work Name %s", workName)) // Configmap will be included in this work object. diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index 362b95d41..b64378acb 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -44,7 +44,7 @@ var _ = Describe("work-api testing", func() { }) workObj := utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, mDetails, ) @@ -113,13 +113,13 @@ var _ = Describe("work-api testing", func() { }) workOne = utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, manifestDetailsOne, ) workTwo = utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, manifestDetailsTwo) @@ -224,7 +224,7 @@ var _ = Describe("work-api testing", func() { }) workObj := utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, initialManifestDetails, ) @@ -286,11 +286,11 @@ var _ = Describe("work-api testing", func() { manifestDetails = generateManifestDetails([]string{ "manifests/test-configmap2.yaml", }) - newDataKey = utils.GetWorkName(5) - newDataValue = utils.GetWorkName(5) + newDataKey = utils.RandomWorkName(5) + newDataValue = utils.RandomWorkName(5) workObj := utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, manifestDetails, ) @@ -348,7 +348,7 @@ var _ = Describe("work-api testing", func() { }) workObj := utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, originalManifestDetails, ) @@ -427,7 +427,7 @@ var _ = Describe("work-api testing", func() { }) workObj := utils.CreateWorkObj( - utils.GetWorkName(5), + utils.RandomWorkName(5), workNamespace.Name, manifestDetails, ) From 85ec2b52c19519f2b252ed90b4ccb990b97a6907 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Fri, 2 Sep 2022 10:06:34 -0700 Subject: [PATCH 13/36] fixes based on comments --- test/e2e/framework/cluster.go | 6 +++--- test/e2e/work_api_e2e_test.go | 18 +++++++++++++----- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index d3566c598..bf0bb67cc 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -8,7 +8,7 @@ import ( "os" "github.com/onsi/gomega" - apiextension "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" @@ -27,7 +27,7 @@ type Cluster struct { Scheme *runtime.Scheme KubeClient client.Client KubeClientSet kubernetes.Interface - APIExtensionClient *apiextension.Clientset + APIExtensionClient *clientset.Clientset DynamicClient dynamic.Interface ClusterName string HubURL string @@ -56,7 +56,7 @@ func GetClusterClient(cluster *Cluster) { cluster.KubeClientSet, err = kubernetes.NewForConfig(restConfig) gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up KubeClient Set") - cluster.APIExtensionClient, err = apiextension.NewForConfig(restConfig) + cluster.APIExtensionClient, err = clientset.NewForConfig(restConfig) gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up API Extension Client.") cluster.DynamicClient, err = dynamic.NewForConfig(restConfig) diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index d99734dbd..322739688 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -9,7 +9,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -37,6 +36,7 @@ var _ = Describe("Work API Controller test", func() { // Comparison Options cmpOptions = []cmp.Option{ cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration"), } ) @@ -84,15 +84,23 @@ var _ = Describe("Work API Controller test", func() { }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to create AppliedWork %s", workName) By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) - Eventually(func() bool { + Eventually(func() string { work := workapi.Work{} if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &work); err != nil { - return false + return err.Error() + } + + want := []metav1.Condition{ + { + Type: conditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "appliedManifestComplete", + }, } - return meta.IsStatusConditionTrue(work.Status.Conditions, conditionTypeApplied) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeTrue()) + return cmp.Diff(want, work.Status.Conditions) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got)") By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) Eventually(func() string { From f29b8e564a27906711b9c3282509d0fff9937440 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Fri, 2 Sep 2022 10:38:45 -0700 Subject: [PATCH 14/36] fixes based on comments --- test/e2e/work_api_e2e_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 322739688..7579f7c1a 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -93,13 +93,14 @@ var _ = Describe("Work API Controller test", func() { want := []metav1.Condition{ { - Type: conditionTypeApplied, - Status: metav1.ConditionTrue, - Reason: "appliedManifestComplete", + Type: conditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "appliedWorkComplete", + Message: "Apply work complete", }, } - return cmp.Diff(want, work.Status.Conditions) + return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got)") By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) From 952ea65b13c896776328763d7c2948c22a18824b Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Fri, 2 Sep 2022 11:43:01 -0700 Subject: [PATCH 15/36] fixes based on comments / Before adding more. --- test/e2e/e2e_test.go | 6 ++-- test/e2e/framework/cluster.go | 2 +- test/e2e/utils/helper.go | 6 ++-- test/e2e/work_api_e2e_test.go | 56 +++++++++++++---------------------- 4 files changed, 28 insertions(+), 42 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index a183a5479..e26c4e096 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -11,7 +11,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -50,7 +50,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) utilruntime.Must(workv1alpha1.AddToScheme(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) } func TestE2E(t *testing.T) { @@ -63,7 +63,7 @@ var _ = BeforeSuite(func() { Expect(kubeconfig).ShouldNot(BeEmpty(), "Failure to retrieve kubeconfig") hubURL = os.Getenv("HUB_SERVER_URL") - Expect(hubURL).ShouldNot(BeEmpty(), "Failure to retrieve Hub URL.") + Expect(hubURL).ShouldNot(BeEmpty(), "Failure to retrieve Hub URL") // hub setup HubCluster.HubURL = hubURL diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index bf0bb67cc..4b8dca286 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -57,7 +57,7 @@ func GetClusterClient(cluster *Cluster) { gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up KubeClient Set") cluster.APIExtensionClient, err = clientset.NewForConfig(restConfig) - gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up API Extension Client.") + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up API Extension Client") cluster.DynamicClient, err = dynamic.NewForConfig(restConfig) gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Dynamic Client") diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 6e3ac879a..6c956bf7c 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -257,7 +257,7 @@ func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) } // CreateWork creates Work object based on manifest given. -func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName string, workNamespace string, manifests []workapi.Manifest) { +func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName, workNamespace string, manifests []workapi.Manifest) { ginkgo.By(fmt.Sprintf("Creating Work with Name %s, %s", workName, workNamespace)) work := workapi.Work{ @@ -280,12 +280,12 @@ func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []worka if len(works) > 0 { // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." for i := range works { - gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work failed.") + gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work %s failed.", works[i].Name) } } } -// AddManifests adds manifests to be included within a Work Ob +// AddManifests adds manifests to be included within a Work. func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []workapi.Manifest { for _, obj := range objects { manifests = append(manifests, workapi.Manifest{ diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 7579f7c1a..7bceb681f 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -30,9 +30,6 @@ var _ = Describe("Work API Controller test", func() { // Includes all works applied to the hub cluster. Used for garbage collection. works []workapi.Work - // Includes all manifests to be within a Work object. - manifests []workapi.Manifest - // Comparison Options cmpOptions = []cmp.Option{ cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), @@ -71,18 +68,12 @@ var _ = Describe("Work API Controller test", func() { }, } - manifests = testutils.AddManifests([]runtime.Object{&manifestConfigMap}, manifests) + manifests := testutils.AddManifests([]runtime.Object{&manifestConfigMap}, []workapi.Manifest{}) By(fmt.Sprintf("creating work %s/%s of %s", workName, workNamespace.Name, manifestConfigMapName)) testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) - By(fmt.Sprintf("Waiting for AppliedWork %s to be created", workName)) - Eventually(func() error { - return MemberCluster.KubeClient.Get(ctx, - types.NamespacedName{Name: workName}, &workapi.AppliedWork{}) - }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed(), "Failed to create AppliedWork %s", workName) - By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) Eventually(func() string { work := workapi.Work{} @@ -101,32 +92,27 @@ var _ = Describe("Work API Controller test", func() { } return cmp.Diff(want, work.Status.Conditions, cmpOptions...) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got)") + }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) - Eventually(func() string { - appliedWork := workapi.AppliedWork{} - if err := MemberCluster.KubeClient.Get(ctx, - types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork); err != nil { - return err.Error() - } + appliedWork := workapi.AppliedWork{} + Expect(MemberCluster.KubeClient.Get(ctx, + types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork)).Should(Succeed()) + + want := workapi.AppliedResourceMeta{ + ResourceIdentifier: workapi.ResourceIdentifier{ + Ordinal: 0, + Group: manifestConfigMap.GroupVersionKind().Group, + Version: manifestConfigMap.GroupVersionKind().Version, + Kind: manifestConfigMap.GroupVersionKind().Kind, + Namespace: manifestConfigMap.Namespace, + Name: manifestConfigMap.Name, + Resource: "configmaps", + }, + } - if len(appliedWork.Status.AppliedResources) == 0 { - return fmt.Sprintf("Applied Work Meta not created for resource %s", manifestConfigMapName) - } - want := workapi.AppliedResourceMeta{ - ResourceIdentifier: workapi.ResourceIdentifier{ - Ordinal: 0, - Group: manifestConfigMap.GroupVersionKind().Group, - Version: manifestConfigMap.GroupVersionKind().Version, - Kind: manifestConfigMap.GroupVersionKind().Kind, - Namespace: manifestConfigMap.Namespace, - Name: manifestConfigMap.Name, - Resource: "configmaps", - }, - } - return cmp.Diff(want, appliedWork.Status.AppliedResources[0], cmpOptions...) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate AppliedResourceMeta mismatch (-want, +got)") + Expect(cmp.Diff(want, appliedWork.Status.AppliedResources[0], cmpOptions...)).Should(BeEmpty(), + "Validate AppliedResourceMeta mismatch (-want, +got):") By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) Eventually(func() string { @@ -134,8 +120,8 @@ var _ = Describe("Work API Controller test", func() { if err != nil { return err.Error() } - return cmp.Diff(cm.Data, manifestConfigMap.Data) + return cmp.Diff(manifestConfigMap.Data, cm.Data) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), - "ConfigMap %s was not created in the cluster %s", manifestConfigMapName, MemberCluster.ClusterName) + "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) }) }) From 2b1ad7bc5b5289922c5ee925f8c584947ef240ad Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 6 Sep 2022 14:34:41 -0700 Subject: [PATCH 16/36] addressed all the comments. --- test/e2e/e2e_test.go | 4 + .../{utils => }/manifests/test-configmap.yaml | 0 .../manifests/test-configmap2.ns.yaml | 0 .../manifests/test-configmap2.yaml | 0 test/e2e/{utils => }/manifests/test-crd.yaml | 0 .../manifests/test-deployment.yaml | 0 .../{utils => }/manifests/test-namespace.yaml | 0 .../{utils => }/manifests/test-secret.yaml | 0 .../{utils => }/manifests/test-service.yaml | 0 .../manifests/test-serviceaccount.yaml | 0 test/e2e/utils/helper.go | 70 ++++++++++-- test/e2e/utils/work_api_test_utils.go | 11 -- test/e2e/work_api_e2e_test.go | 108 +++++++++++++++--- test/e2e/work_api_test.go | 5 +- 14 files changed, 159 insertions(+), 39 deletions(-) rename test/e2e/{utils => }/manifests/test-configmap.yaml (100%) rename test/e2e/{utils => }/manifests/test-configmap2.ns.yaml (100%) rename test/e2e/{utils => }/manifests/test-configmap2.yaml (100%) rename test/e2e/{utils => }/manifests/test-crd.yaml (100%) rename test/e2e/{utils => }/manifests/test-deployment.yaml (100%) rename test/e2e/{utils => }/manifests/test-namespace.yaml (100%) rename test/e2e/{utils => }/manifests/test-secret.yaml (100%) rename test/e2e/{utils => }/manifests/test-service.yaml (100%) rename test/e2e/{utils => }/manifests/test-serviceaccount.yaml (100%) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index e26c4e096..06d494bb8 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -5,6 +5,7 @@ Licensed under the MIT license. package e2e import ( + "embed" "fmt" "os" "testing" @@ -44,6 +45,9 @@ var ( // Used to decode an unstructured object. genericCodecs = serializer.NewCodecFactory(scheme) genericCodec = genericCodecs.UniversalDeserializer() + + //go:embed manifests + TestManifestFiles embed.FS ) func init() { diff --git a/test/e2e/utils/manifests/test-configmap.yaml b/test/e2e/manifests/test-configmap.yaml similarity index 100% rename from test/e2e/utils/manifests/test-configmap.yaml rename to test/e2e/manifests/test-configmap.yaml diff --git a/test/e2e/utils/manifests/test-configmap2.ns.yaml b/test/e2e/manifests/test-configmap2.ns.yaml similarity index 100% rename from test/e2e/utils/manifests/test-configmap2.ns.yaml rename to test/e2e/manifests/test-configmap2.ns.yaml diff --git a/test/e2e/utils/manifests/test-configmap2.yaml b/test/e2e/manifests/test-configmap2.yaml similarity index 100% rename from test/e2e/utils/manifests/test-configmap2.yaml rename to test/e2e/manifests/test-configmap2.yaml diff --git a/test/e2e/utils/manifests/test-crd.yaml b/test/e2e/manifests/test-crd.yaml similarity index 100% rename from test/e2e/utils/manifests/test-crd.yaml rename to test/e2e/manifests/test-crd.yaml diff --git a/test/e2e/utils/manifests/test-deployment.yaml b/test/e2e/manifests/test-deployment.yaml similarity index 100% rename from test/e2e/utils/manifests/test-deployment.yaml rename to test/e2e/manifests/test-deployment.yaml diff --git a/test/e2e/utils/manifests/test-namespace.yaml b/test/e2e/manifests/test-namespace.yaml similarity index 100% rename from test/e2e/utils/manifests/test-namespace.yaml rename to test/e2e/manifests/test-namespace.yaml diff --git a/test/e2e/utils/manifests/test-secret.yaml b/test/e2e/manifests/test-secret.yaml similarity index 100% rename from test/e2e/utils/manifests/test-secret.yaml rename to test/e2e/manifests/test-secret.yaml diff --git a/test/e2e/utils/manifests/test-service.yaml b/test/e2e/manifests/test-service.yaml similarity index 100% rename from test/e2e/utils/manifests/test-service.yaml rename to test/e2e/manifests/test-service.yaml diff --git a/test/e2e/utils/manifests/test-serviceaccount.yaml b/test/e2e/manifests/test-serviceaccount.yaml similarity index 100% rename from test/e2e/utils/manifests/test-serviceaccount.yaml rename to test/e2e/manifests/test-serviceaccount.yaml diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 6c956bf7c..6e32499bf 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -6,7 +6,7 @@ package utils import ( "context" - "embed" + "crypto/sha256" "fmt" "time" @@ -18,8 +18,10 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -35,8 +37,9 @@ var ( // PollTimeout defines the time after which the poll operation times out. PollTimeout = 90 * time.Second - //go:embed manifests - TestManifestFiles embed.FS + manifestHashAnnotation = "fleet.azure.com/spec-hash" + + lastAppliedConfigAnnotation = "fleet.azure.com/last-applied-configuration" ) // NewMemberCluster return a new member cluster. @@ -210,11 +213,14 @@ func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.Clu // WaitWork waits for Work to be present on the hub cluster. func WaitWork(ctx context.Context, cluster framework.Cluster, workName, workNamespace string) { + name := types.NamespacedName{Name: workName, Namespace: workNamespace} + klog.Infof("Waiting for Work(%s/%s) to be synced", workName, workNamespace) gomega.Eventually(func() error { var work workapi.Work - return cluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) - }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s/%s not synced", workName, workNamespace) + + return cluster.KubeClient.Get(ctx, name, &work) + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s not synced", name) } // CreateNamespace create namespace and waits for namespace to exist. @@ -226,6 +232,7 @@ func CreateNamespace(cluster framework.Cluster, ns *corev1.Namespace) { klog.Infof("Waiting for Namespace(%s) to be synced", ns.Name) gomega.Eventually(func() error { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: ns.Name, Namespace: ""}, ns) + return err }, PollTimeout, PollInterval).Should(gomega.Succeed()) } @@ -280,7 +287,7 @@ func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []worka if len(works) > 0 { // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." for i := range works { - gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work %s failed.", works[i].Name) + gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work %s failed", works[i].Name) } } } @@ -288,8 +295,10 @@ func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []worka // AddManifests adds manifests to be included within a Work. func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []workapi.Manifest { for _, obj := range objects { + rawObj, err := json.Marshal(obj) + gomega.Expect(err).Should(gomega.Succeed()) manifests = append(manifests, workapi.Manifest{ - RawExtension: runtime.RawExtension{Object: obj}, + RawExtension: runtime.RawExtension{Object: obj, Raw: rawObj}, }) } return manifests @@ -322,3 +331,50 @@ func (matcher AlreadyExistMatcher) FailureMessage(actual interface{}) (message s func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to be already exist") } + +// GenerateSpecHash will generate Hash value used for annotation in the work-api for verification for each manifests given. +func GenerateSpecHash(manifests []workapi.Manifest) []string { + var specHashes []string + for index, manifest := range manifests { + unstructuredObj := &unstructured.Unstructured{} + err := unstructuredObj.UnmarshalJSON(manifest.Raw) + gomega.Expect(err).Should(gomega.Succeed(), + "Invalid manifest with ordinal of %d", index) + + annotation := unstructuredObj.GetAnnotations() + if annotation != nil { + delete(annotation, manifestHashAnnotation) + delete(annotation, lastAppliedConfigAnnotation) + if len(annotation) == 0 { + unstructuredObj.SetAnnotations(nil) + } else { + unstructuredObj.SetAnnotations(annotation) + } + } + + unstructuredObj.SetResourceVersion("") + unstructuredObj.SetGeneration(0) + unstructuredObj.SetUID("") + unstructuredObj.SetSelfLink("") + unstructuredObj.SetDeletionTimestamp(nil) + unstructuredObj.SetManagedFields(nil) + unstructured.RemoveNestedField(unstructuredObj.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(unstructuredObj.Object, "status") + // compute the sha256 hash of the remaining data + + jsonBytes, err := json.Marshal(unstructuredObj) + gomega.Expect(err).Should(gomega.Succeed(), + "Marshaling failed for manifest with ordinal of %d", index) + specHashes = append(specHashes, fmt.Sprintf("%x", sha256.Sum256(jsonBytes))) + } + return specHashes +} + +// GetConfigMap retrieves a configmap based on the name and namespace given. +func GetConfigMap(ctx context.Context, cluster framework.Cluster, name, namespace string) (corev1.ConfigMap, error) { + cm, err := cluster.KubeClientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return corev1.ConfigMap{}, err + } + return *cm, err +} diff --git a/test/e2e/utils/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go index e65e56d89..e0bba6be3 100644 --- a/test/e2e/utils/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -7,10 +7,7 @@ package utils import ( "context" - "fmt" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -88,11 +85,3 @@ func UpdateWork(work *workapi.Work, hubCluster *framework.Cluster) (*workapi.Wor } return updatedWork, err } - -func WaitAppliedWorkPresent(workName string, memberCluster *framework.Cluster) { - ginkgo.By(fmt.Sprintf("Waiting for AppliedWork to be created with Name %s on memberCluster %s", workName, memberCluster.ClusterName)) - gomega.Eventually(func() error { - _, err := RetrieveAppliedWork(workName, memberCluster) - return err - }, PollTimeout, PollInterval).Should(gomega.BeNil()) -} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 7bceb681f..99e745000 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -12,6 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" testutils "go.goms.io/fleet/test/e2e/utils" @@ -22,6 +23,7 @@ var _ = Describe("Work API Controller test", func() { const ( conditionTypeApplied = "Applied" + specHashAnnotation = "fleet.azure.com/spec-hash" ) var ( @@ -33,7 +35,8 @@ var _ = Describe("Work API Controller test", func() { // Comparison Options cmpOptions = []cmp.Option{ cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), - cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration"), + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration", "Message"), + cmpopts.IgnoreFields(metav1.OwnerReference{}, "BlockOwnerDeletion"), } ) @@ -50,6 +53,7 @@ var _ = Describe("Work API Controller test", func() { It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { workName := testutils.RandomWorkName(5) + By(fmt.Sprintf("Here is the work Name %s", workName)) // Configmap will be included in this work object. @@ -75,8 +79,8 @@ var _ = Describe("Work API Controller test", func() { testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) + work := workapi.Work{} Eventually(func() string { - work := workapi.Work{} if err := HubCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &work); err != nil { return err.Error() @@ -84,44 +88,114 @@ var _ = Describe("Work API Controller test", func() { want := []metav1.Condition{ { - Type: conditionTypeApplied, - Status: metav1.ConditionTrue, - Reason: "appliedWorkComplete", - Message: "Apply work complete", + Type: conditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "appliedWorkComplete", }, } return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") + By(fmt.Sprintf("Work %s should contain every manifest conditions for corresponding manifests with correct condition", workName)) + Expect(len(work.Status.ManifestConditions)).To(Equal(1), + "Invalid manifest conditions for work %s", workName) + + expectedManifestCondition := []workapi.ManifestCondition{ + { + Conditions: []metav1.Condition{ + { + Type: conditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "appliedManifestComplete", + }, + }, + Identifier: workapi.ResourceIdentifier{ + Ordinal: 0, + Group: manifestConfigMap.GroupVersionKind().Group, + Version: manifestConfigMap.GroupVersionKind().Version, + Kind: manifestConfigMap.GroupVersionKind().Kind, + Namespace: manifestConfigMap.Namespace, + Name: manifestConfigMap.Name, + Resource: "configmaps", + }, + }, + } + //Expecting the reason of the condition seperately, since it could be either Complete or Updated. + Expect(work.Status.ManifestConditions[0].Conditions[0].Reason).Should( + SatisfyAny(Equal("appliedManifestComplete"), Equal("appliedManifestUpdated"))) + // Will leave out the reason for this check, since the manifest condition's reason was checked above. + Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, + append(cmpOptions, cmpopts.IgnoreFields(metav1.Condition{}, "Reason"))...)).Should(BeEmpty(), + "Manifest Condition not matching for work %s (-want, +got):", workName) + By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) appliedWork := workapi.AppliedWork{} Expect(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork)).Should(Succeed()) - want := workapi.AppliedResourceMeta{ - ResourceIdentifier: workapi.ResourceIdentifier{ - Ordinal: 0, - Group: manifestConfigMap.GroupVersionKind().Group, - Version: manifestConfigMap.GroupVersionKind().Version, - Kind: manifestConfigMap.GroupVersionKind().Kind, - Namespace: manifestConfigMap.Namespace, - Name: manifestConfigMap.Name, - Resource: "configmaps", + want := workapi.AppliedtWorkStatus{ + AppliedResources: []workapi.AppliedResourceMeta{ + { + ResourceIdentifier: workapi.ResourceIdentifier{ + Ordinal: 0, + Group: manifestConfigMap.GroupVersionKind().Group, + Version: manifestConfigMap.GroupVersionKind().Version, + Kind: manifestConfigMap.GroupVersionKind().Kind, + Namespace: manifestConfigMap.Namespace, + Name: manifestConfigMap.Name, + Resource: "configmaps", + }, + }, }, } - Expect(cmp.Diff(want, appliedWork.Status.AppliedResources[0], cmpOptions...)).Should(BeEmpty(), + Expect(cmp.Diff(want, appliedWork.Status, cmpOptions...)).Should(BeEmpty(), "Validate AppliedResourceMeta mismatch (-want, +got):") By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) Eventually(func() string { - cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMap.Name, metav1.GetOptions{}) + cm, err := testutils.GetConfigMap(ctx, *MemberCluster, manifestConfigMap.Name, manifestConfigMap.Namespace) if err != nil { return err.Error() } return cmp.Diff(manifestConfigMap.Data, cm.Data) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) + + By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, workName)) + configMap, err := testutils.GetConfigMap(ctx, *MemberCluster, manifestConfigMap.Name, manifestConfigMap.Namespace) + Expect(err).Should(Succeed(), "Retrieving resource %s failed", manifestConfigMap.Name) + wantOwner := []metav1.OwnerReference{ + { + APIVersion: workapi.GroupVersion.String(), + Kind: workapi.AppliedWorkKind, + Name: appliedWork.GetName(), + UID: appliedWork.GetUID(), + }, + } + + Expect(cmp.Diff(wantOwner, configMap.OwnerReferences, cmpOptions...)).Should(BeEmpty(), "OwnerReference mismatch (-want, +got):") + + By(fmt.Sprintf("Validating that the annotation of resource's spec exists on the resource %s", manifestConfigMapName)) + // Owner Reference is created when manifests are being applied. + ownerRef := []metav1.OwnerReference{ + { + APIVersion: workapi.GroupVersion.String(), + Kind: workapi.AppliedWorkKind, + Name: appliedWork.GetName(), + UID: appliedWork.GetUID(), + BlockOwnerDeletion: pointer.Bool(false), + }, + } + validateConfigMap := manifestConfigMap.DeepCopy() + validateConfigMap.SetOwnerReferences(ownerRef) + + //Generating SpecHash for work object to compare with Annotation in the resource. + newManifests := testutils.AddManifests([]runtime.Object{validateConfigMap}, []workapi.Manifest{}) + specHashes := testutils.GenerateSpecHash(newManifests) + + Expect(cmp.Diff(specHashes[0], configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), + "Validating SpecHash Annotation failed for resource %s in work %s(-want, +got):", configMap.Name, workName) }) }) diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index b64378acb..9fc76d4d2 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -133,9 +133,6 @@ var _ = Describe("work-api testing", func() { err = utils.CreateWorkOld(workTwo, HubCluster) Expect(err).ToNot(HaveOccurred()) - utils.WaitAppliedWorkPresent(workOne.Name, MemberCluster) - utils.WaitAppliedWorkPresent(workTwo.Name, MemberCluster) - By("Checking the Applied Work status of each to see both are applied.") Eventually(func() bool { appliedWorkOne, err := utils.RetrieveAppliedWork(workOne.Name, MemberCluster) @@ -468,7 +465,7 @@ func generateManifestDetails(manifestFiles []string) []utils.ManifestDetails { detail := utils.ManifestDetails{} // Read files, create manifest - fileRaw, err := utils.TestManifestFiles.ReadFile(file) + fileRaw, err := TestManifestFiles.ReadFile(file) Expect(err).ToNot(HaveOccurred()) obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) From 68f064195f8d3b4f149f6c8df0a13822609183bb Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 6 Sep 2022 19:17:27 -0700 Subject: [PATCH 17/36] lint checks --- test/e2e/utils/helper.go | 2 +- test/e2e/work_api_e2e_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 6e32499bf..3509b2b24 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -334,7 +334,7 @@ func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (me // GenerateSpecHash will generate Hash value used for annotation in the work-api for verification for each manifests given. func GenerateSpecHash(manifests []workapi.Manifest) []string { - var specHashes []string + specHashes := make([]string, len(manifests)) for index, manifest := range manifests { unstructuredObj := &unstructured.Unstructured{} err := unstructuredObj.UnmarshalJSON(manifest.Raw) diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 99e745000..5cc4a0233 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -121,7 +121,7 @@ var _ = Describe("Work API Controller test", func() { }, }, } - //Expecting the reason of the condition seperately, since it could be either Complete or Updated. + //Expecting the reason of the condition separately, since it could be either Complete or Updated. Expect(work.Status.ManifestConditions[0].Conditions[0].Reason).Should( SatisfyAny(Equal("appliedManifestComplete"), Equal("appliedManifestUpdated"))) // Will leave out the reason for this check, since the manifest condition's reason was checked above. @@ -195,7 +195,7 @@ var _ = Describe("Work API Controller test", func() { newManifests := testutils.AddManifests([]runtime.Object{validateConfigMap}, []workapi.Manifest{}) specHashes := testutils.GenerateSpecHash(newManifests) - Expect(cmp.Diff(specHashes[0], configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), + Expect(cmp.Diff(specHashes[1], configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), "Validating SpecHash Annotation failed for resource %s in work %s(-want, +got):", configMap.Name, workName) }) }) From adab9395f8fa6a769fa665e46373ff15a7a2a4d6 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 7 Sep 2022 11:48:03 -0700 Subject: [PATCH 18/36] fixes from comments --- test/e2e/utils/helper.go | 2 +- test/e2e/utils/work_api_test_utils.go | 2 ++ test/e2e/work_api_e2e_test.go | 52 +++++++++------------------ 3 files changed, 19 insertions(+), 37 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 3509b2b24..0f6f1a292 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -296,7 +296,7 @@ func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []worka func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []workapi.Manifest { for _, obj := range objects { rawObj, err := json.Marshal(obj) - gomega.Expect(err).Should(gomega.Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", obj) manifests = append(manifests, workapi.Manifest{ RawExtension: runtime.RawExtension{Object: obj, Raw: rawObj}, }) diff --git a/test/e2e/utils/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go index e0bba6be3..e69932faf 100644 --- a/test/e2e/utils/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -17,6 +17,8 @@ import ( "go.goms.io/fleet/test/e2e/framework" ) +// Deprecated: The functions in this file will be removed or moved to helper.go. + type ManifestDetails struct { Manifest workapi.Manifest GVK *schema.GroupVersionKind diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 5cc4a0233..c40cd0952 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -12,7 +12,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" testutils "go.goms.io/fleet/test/e2e/utils" @@ -35,7 +34,7 @@ var _ = Describe("Work API Controller test", func() { // Comparison Options cmpOptions = []cmp.Option{ cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), - cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "ObservedGeneration", "Message"), + cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime", "ObservedGeneration"), cmpopts.IgnoreFields(metav1.OwnerReference{}, "BlockOwnerDeletion"), } ) @@ -78,11 +77,14 @@ var _ = Describe("Work API Controller test", func() { testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) + // Creating types.NamespacedName to use in retrieving objects. + namespaceType := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} + By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) work := workapi.Work{} Eventually(func() string { if err := HubCluster.KubeClient.Get(ctx, - types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &work); err != nil { + namespaceType, &work); err != nil { return err.Error() } @@ -97,17 +99,14 @@ var _ = Describe("Work API Controller test", func() { return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") - By(fmt.Sprintf("Work %s should contain every manifest conditions for corresponding manifests with correct condition", workName)) - Expect(len(work.Status.ManifestConditions)).To(Equal(1), - "Invalid manifest conditions for work %s", workName) - expectedManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ { Type: conditionTypeApplied, Status: metav1.ConditionTrue, - Reason: "appliedManifestComplete", + // It is possible for the reason to be appliedManifestUpdated + //Reason: "appliedManifestComplete", }, }, Identifier: workapi.ResourceIdentifier{ @@ -121,18 +120,16 @@ var _ = Describe("Work API Controller test", func() { }, }, } - //Expecting the reason of the condition separately, since it could be either Complete or Updated. - Expect(work.Status.ManifestConditions[0].Conditions[0].Reason).Should( - SatisfyAny(Equal("appliedManifestComplete"), Equal("appliedManifestUpdated"))) - // Will leave out the reason for this check, since the manifest condition's reason was checked above. - Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, - append(cmpOptions, cmpopts.IgnoreFields(metav1.Condition{}, "Reason"))...)).Should(BeEmpty(), + + //Excluding Reason for check, since there could be two possible reasons. + options := append(cmpOptions, cmpopts.IgnoreFields(metav1.Condition{}, "Reason")) + Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, options...)).Should(BeEmpty(), "Manifest Condition not matching for work %s (-want, +got):", workName) By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) appliedWork := workapi.AppliedWork{} Expect(MemberCluster.KubeClient.Get(ctx, - types.NamespacedName{Name: workName, Namespace: workNamespace.Name}, &appliedWork)).Should(Succeed()) + namespaceType, &appliedWork)).Should(Succeed()) want := workapi.AppliedtWorkStatus{ AppliedResources: []workapi.AppliedResourceMeta{ @@ -155,7 +152,7 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) Eventually(func() string { - cm, err := testutils.GetConfigMap(ctx, *MemberCluster, manifestConfigMap.Name, manifestConfigMap.Namespace) + cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) if err != nil { return err.Error() } @@ -164,7 +161,7 @@ var _ = Describe("Work API Controller test", func() { "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, workName)) - configMap, err := testutils.GetConfigMap(ctx, *MemberCluster, manifestConfigMap.Name, manifestConfigMap.Namespace) + configMap, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) Expect(err).Should(Succeed(), "Retrieving resource %s failed", manifestConfigMap.Name) wantOwner := []metav1.OwnerReference{ { @@ -178,24 +175,7 @@ var _ = Describe("Work API Controller test", func() { Expect(cmp.Diff(wantOwner, configMap.OwnerReferences, cmpOptions...)).Should(BeEmpty(), "OwnerReference mismatch (-want, +got):") By(fmt.Sprintf("Validating that the annotation of resource's spec exists on the resource %s", manifestConfigMapName)) - // Owner Reference is created when manifests are being applied. - ownerRef := []metav1.OwnerReference{ - { - APIVersion: workapi.GroupVersion.String(), - Kind: workapi.AppliedWorkKind, - Name: appliedWork.GetName(), - UID: appliedWork.GetUID(), - BlockOwnerDeletion: pointer.Bool(false), - }, - } - validateConfigMap := manifestConfigMap.DeepCopy() - validateConfigMap.SetOwnerReferences(ownerRef) - - //Generating SpecHash for work object to compare with Annotation in the resource. - newManifests := testutils.AddManifests([]runtime.Object{validateConfigMap}, []workapi.Manifest{}) - specHashes := testutils.GenerateSpecHash(newManifests) - - Expect(cmp.Diff(specHashes[1], configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), - "Validating SpecHash Annotation failed for resource %s in work %s(-want, +got):", configMap.Name, workName) + Expect(configMap.GetAnnotations()[specHashAnnotation]).ToNot(BeNil(), + "There is no spec annotation on the resource %s", configMap.Name) }) }) From 1f9e6db45d88e202e9ad1165a44c4c29706bcdad Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 7 Sep 2022 11:51:09 -0700 Subject: [PATCH 19/36] Added comment for expectation --- test/e2e/work_api_e2e_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index c40cd0952..76e8f5853 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -129,7 +129,8 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) appliedWork := workapi.AppliedWork{} Expect(MemberCluster.KubeClient.Get(ctx, - namespaceType, &appliedWork)).Should(Succeed()) + namespaceType, &appliedWork)).Should(Succeed(), + "Retrieving AppliedWork %s failed", workName) want := workapi.AppliedtWorkStatus{ AppliedResources: []workapi.AppliedResourceMeta{ From e66112ba17d91f4c689d6be293bd21e4ca6120af Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 7 Sep 2022 13:12:44 -0700 Subject: [PATCH 20/36] using .string() for namesapceType for easier readability, and removed duplicate logs. --- test/e2e/utils/helper.go | 6 ------ test/e2e/work_api_e2e_test.go | 15 ++++++++------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 0f6f1a292..9a11d0375 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -36,10 +36,6 @@ var ( PollInterval = 5 * time.Second // PollTimeout defines the time after which the poll operation times out. PollTimeout = 90 * time.Second - - manifestHashAnnotation = "fleet.azure.com/spec-hash" - - lastAppliedConfigAnnotation = "fleet.azure.com/last-applied-configuration" ) // NewMemberCluster return a new member cluster. @@ -265,8 +261,6 @@ func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) // CreateWork creates Work object based on manifest given. func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName, workNamespace string, manifests []workapi.Manifest) { - ginkgo.By(fmt.Sprintf("Creating Work with Name %s, %s", workName, workNamespace)) - work := workapi.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 76e8f5853..411723690 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -71,16 +71,16 @@ var _ = Describe("Work API Controller test", func() { }, } + // Creating types.NamespacedName to use in retrieving objects. + namespaceType := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} + manifests := testutils.AddManifests([]runtime.Object{&manifestConfigMap}, []workapi.Manifest{}) - By(fmt.Sprintf("creating work %s/%s of %s", workName, workNamespace.Name, manifestConfigMapName)) + By(fmt.Sprintf("creating work %s of %s", namespaceType.String(), manifestConfigMapName)) testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) - // Creating types.NamespacedName to use in retrieving objects. - namespaceType := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} - - By(fmt.Sprintf("Applied Condition should be set to True for Work %s/%s", workName, workNamespace.Name)) + By(fmt.Sprintf("Applied Condition should be set to True for Work %s", namespaceType.String())) work := workapi.Work{} Eventually(func() string { if err := HubCluster.KubeClient.Get(ctx, @@ -99,6 +99,7 @@ var _ = Describe("Work API Controller test", func() { return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") + By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied.", namespaceType.String())) expectedManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ @@ -124,7 +125,7 @@ var _ = Describe("Work API Controller test", func() { //Excluding Reason for check, since there could be two possible reasons. options := append(cmpOptions, cmpopts.IgnoreFields(metav1.Condition{}, "Reason")) Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, options...)).Should(BeEmpty(), - "Manifest Condition not matching for work %s (-want, +got):", workName) + "Manifest Condition not matching for work %s (-want, +got):", namespaceType.String()) By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) appliedWork := workapi.AppliedWork{} @@ -161,7 +162,7 @@ var _ = Describe("Work API Controller test", func() { }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) - By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, workName)) + By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, namespaceType.String())) configMap, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) Expect(err).Should(Succeed(), "Retrieving resource %s failed", manifestConfigMap.Name) wantOwner := []metav1.OwnerReference{ From 3a851942025578d21eaac8cba6c8e7f39db130c7 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 7 Sep 2022 14:09:09 -0700 Subject: [PATCH 21/36] removing a period --- test/e2e/work_api_e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 411723690..40080f7e7 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -99,7 +99,7 @@ var _ = Describe("Work API Controller test", func() { return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") - By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied.", namespaceType.String())) + By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied", namespaceType.String())) expectedManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ From 56b2590bc2020bb78361e79c6c4b0a0a8526bcdf Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Wed, 7 Sep 2022 22:42:33 -0700 Subject: [PATCH 22/36] added more validation --- test/e2e/utils/helper.go | 55 +++++------------------------------ test/e2e/work_api_e2e_test.go | 36 ++++++++++++++++------- 2 files changed, 32 insertions(+), 59 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 9a11d0375..cbcb13292 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -18,7 +18,6 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -303,6 +302,13 @@ func RandomWorkName(length int) string { return "work" + rand.String(length) } +// GenerateSpecHash generates a Sha256 value from the object. +func GenerateSpecHash(object runtime.Object) string { + rawObj, err := json.Marshal(object) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", rawObj) + return fmt.Sprintf("%x", sha256.Sum256(rawObj)) +} + // AlreadyExistMatcher matches the error to be already exist type AlreadyExistMatcher struct { } @@ -325,50 +331,3 @@ func (matcher AlreadyExistMatcher) FailureMessage(actual interface{}) (message s func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (message string) { return format.Message(actual, "not to be already exist") } - -// GenerateSpecHash will generate Hash value used for annotation in the work-api for verification for each manifests given. -func GenerateSpecHash(manifests []workapi.Manifest) []string { - specHashes := make([]string, len(manifests)) - for index, manifest := range manifests { - unstructuredObj := &unstructured.Unstructured{} - err := unstructuredObj.UnmarshalJSON(manifest.Raw) - gomega.Expect(err).Should(gomega.Succeed(), - "Invalid manifest with ordinal of %d", index) - - annotation := unstructuredObj.GetAnnotations() - if annotation != nil { - delete(annotation, manifestHashAnnotation) - delete(annotation, lastAppliedConfigAnnotation) - if len(annotation) == 0 { - unstructuredObj.SetAnnotations(nil) - } else { - unstructuredObj.SetAnnotations(annotation) - } - } - - unstructuredObj.SetResourceVersion("") - unstructuredObj.SetGeneration(0) - unstructuredObj.SetUID("") - unstructuredObj.SetSelfLink("") - unstructuredObj.SetDeletionTimestamp(nil) - unstructuredObj.SetManagedFields(nil) - unstructured.RemoveNestedField(unstructuredObj.Object, "metadata", "creationTimestamp") - unstructured.RemoveNestedField(unstructuredObj.Object, "status") - // compute the sha256 hash of the remaining data - - jsonBytes, err := json.Marshal(unstructuredObj) - gomega.Expect(err).Should(gomega.Succeed(), - "Marshaling failed for manifest with ordinal of %d", index) - specHashes = append(specHashes, fmt.Sprintf("%x", sha256.Sum256(jsonBytes))) - } - return specHashes -} - -// GetConfigMap retrieves a configmap based on the name and namespace given. -func GetConfigMap(ctx context.Context, cluster framework.Cluster, name, namespace string) (corev1.ConfigMap, error) { - cm, err := cluster.KubeClientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return corev1.ConfigMap{}, err - } - return *cm, err -} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 40080f7e7..ed8cd8ea3 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -12,6 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" testutils "go.goms.io/fleet/test/e2e/utils" @@ -75,16 +76,15 @@ var _ = Describe("Work API Controller test", func() { namespaceType := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} manifests := testutils.AddManifests([]runtime.Object{&manifestConfigMap}, []workapi.Manifest{}) - By(fmt.Sprintf("creating work %s of %s", namespaceType.String(), manifestConfigMapName)) + By(fmt.Sprintf("creating work %s of %s", namespaceType, manifestConfigMapName)) testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) - By(fmt.Sprintf("Applied Condition should be set to True for Work %s", namespaceType.String())) + By(fmt.Sprintf("Applied Condition should be set to True for Work %s", namespaceType)) work := workapi.Work{} Eventually(func() string { - if err := HubCluster.KubeClient.Get(ctx, - namespaceType, &work); err != nil { + if err := HubCluster.KubeClient.Get(ctx, namespaceType, &work); err != nil { return err.Error() } @@ -99,7 +99,7 @@ var _ = Describe("Work API Controller test", func() { return cmp.Diff(want, work.Status.Conditions, cmpOptions...) }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") - By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied", namespaceType.String())) + By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied", namespaceType)) expectedManifestCondition := []workapi.ManifestCondition{ { Conditions: []metav1.Condition{ @@ -125,12 +125,11 @@ var _ = Describe("Work API Controller test", func() { //Excluding Reason for check, since there could be two possible reasons. options := append(cmpOptions, cmpopts.IgnoreFields(metav1.Condition{}, "Reason")) Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, options...)).Should(BeEmpty(), - "Manifest Condition not matching for work %s (-want, +got):", namespaceType.String()) + "Manifest Condition not matching for work %s (-want, +got):", namespaceType) By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) appliedWork := workapi.AppliedWork{} - Expect(MemberCluster.KubeClient.Get(ctx, - namespaceType, &appliedWork)).Should(Succeed(), + Expect(MemberCluster.KubeClient.Get(ctx, namespaceType, &appliedWork)).Should(Succeed(), "Retrieving AppliedWork %s failed", workName) want := workapi.AppliedtWorkStatus{ @@ -162,7 +161,7 @@ var _ = Describe("Work API Controller test", func() { }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) - By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, namespaceType.String())) + By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, namespaceType)) configMap, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) Expect(err).Should(Succeed(), "Retrieving resource %s failed", manifestConfigMap.Name) wantOwner := []metav1.OwnerReference{ @@ -177,7 +176,22 @@ var _ = Describe("Work API Controller test", func() { Expect(cmp.Diff(wantOwner, configMap.OwnerReferences, cmpOptions...)).Should(BeEmpty(), "OwnerReference mismatch (-want, +got):") By(fmt.Sprintf("Validating that the annotation of resource's spec exists on the resource %s", manifestConfigMapName)) - Expect(configMap.GetAnnotations()[specHashAnnotation]).ToNot(BeNil(), - "There is no spec annotation on the resource %s", configMap.Name) + + // Owner Reference is created when manifests are being applied. + ownerRef := []metav1.OwnerReference{ + { + APIVersion: workapi.GroupVersion.String(), + Kind: workapi.AppliedWorkKind, + Name: appliedWork.GetName(), + UID: appliedWork.GetUID(), + BlockOwnerDeletion: pointer.Bool(false), + }, + } + validateConfigMap := manifestConfigMap.DeepCopy() + validateConfigMap.SetOwnerReferences(ownerRef) + wantHash := testutils.GenerateSpecHash(validateConfigMap) + + Expect(cmp.Diff(wantHash, configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), + "Validating SpecHash Annotation failed for resource %s in work %s(-want, +got):", configMap.Name, workName) }) }) From d16123bc85b34e6d305c7b78eb1ba3bfcacc7d6f Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 14:11:56 -0700 Subject: [PATCH 23/36] fixed the validation --- test/e2e/utils/helper.go | 10 +++++++--- test/e2e/work_api_e2e_test.go | 7 ++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index cbcb13292..e2f441638 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -8,6 +8,7 @@ import ( "context" "crypto/sha256" "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "time" // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. @@ -304,9 +305,12 @@ func RandomWorkName(length int) string { // GenerateSpecHash generates a Sha256 value from the object. func GenerateSpecHash(object runtime.Object) string { - rawObj, err := json.Marshal(object) - gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", rawObj) - return fmt.Sprintf("%x", sha256.Sum256(rawObj)) + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + unstructured.RemoveNestedField(unstructuredObj, "metadata", "creationTimestamp") + jsonBytes, err := json.Marshal(unstructuredObj) + + gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", jsonBytes) + return fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) } // AlreadyExistMatcher matches the error to be already exist diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index ed8cd8ea3..f22598a2c 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -64,8 +64,9 @@ var _ = Describe("Work API Controller test", func() { Kind: "ConfigMap", }, ObjectMeta: metav1.ObjectMeta{ - Name: manifestConfigMapName, - Namespace: workResourceNamespace.Name, + Name: manifestConfigMapName, + Namespace: workResourceNamespace.Name, + CreationTimestamp: metav1.Time{}, }, Data: map[string]string{ "test-key": "test-data", @@ -189,8 +190,8 @@ var _ = Describe("Work API Controller test", func() { } validateConfigMap := manifestConfigMap.DeepCopy() validateConfigMap.SetOwnerReferences(ownerRef) - wantHash := testutils.GenerateSpecHash(validateConfigMap) + wantHash := testutils.GenerateSpecHash(validateConfigMap) Expect(cmp.Diff(wantHash, configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), "Validating SpecHash Annotation failed for resource %s in work %s(-want, +got):", configMap.Name, workName) }) From 69a044144f50393d7c7e54446a2a275cc53d7da9 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 14:20:18 -0700 Subject: [PATCH 24/36] fix comment + import --- test/e2e/utils/helper.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index e2f441638..2081621b3 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -8,7 +8,6 @@ import ( "context" "crypto/sha256" "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "time" // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. @@ -19,6 +18,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -303,7 +303,7 @@ func RandomWorkName(length int) string { return "work" + rand.String(length) } -// GenerateSpecHash generates a Sha256 value from the object. +// GenerateSpecHash formats the object and creates a hash value for comparison. func GenerateSpecHash(object runtime.Object) string { unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) unstructured.RemoveNestedField(unstructuredObj, "metadata", "creationTimestamp") From 05216b255df862f7b2309159b64ed72aad7b5a3c Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 15:37:32 -0700 Subject: [PATCH 25/36] add error handling --- test/e2e/utils/helper.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 2081621b3..629c00e24 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -306,6 +306,7 @@ func RandomWorkName(length int) string { // GenerateSpecHash formats the object and creates a hash value for comparison. func GenerateSpecHash(object runtime.Object) string { unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to convert the object %s for formatting", object) unstructured.RemoveNestedField(unstructuredObj, "metadata", "creationTimestamp") jsonBytes, err := json.Marshal(unstructuredObj) From 403d2f8030cee40977e408d152969215becc7161 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 15:42:52 -0700 Subject: [PATCH 26/36] import lint fix --- test/e2e/work_api_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index 9fc76d4d2..86419bcd0 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -7,7 +7,6 @@ package e2e import ( "context" - "go.goms.io/fleet/test/e2e/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -23,6 +22,7 @@ import ( workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/test/e2e/utils" ) const ( From 4bcb9f39a5b98aa6f07eea0f21ba16e28c63dbba Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 19:59:29 -0700 Subject: [PATCH 27/36] removing some things that weren't there --- test/e2e/utils/helper.go | 24 ------------------------ test/e2e/work_api_e2e_test.go | 5 ++--- 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 629c00e24..86076c051 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -13,7 +13,6 @@ import ( // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "github.com/onsi/gomega/format" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -313,26 +312,3 @@ func GenerateSpecHash(object runtime.Object) string { gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", jsonBytes) return fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) } - -// AlreadyExistMatcher matches the error to be already exist -type AlreadyExistMatcher struct { -} - -// Match matches error. -func (matcher AlreadyExistMatcher) Match(actual interface{}) (success bool, err error) { - if actual == nil { - return false, nil - } - actualError := actual.(error) - return apierrors.IsAlreadyExists(actualError), nil -} - -// FailureMessage builds an error message. -func (matcher AlreadyExistMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be already exist") -} - -// NegatedFailureMessage builds an error message. -func (matcher AlreadyExistMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be already exist") -} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index f22598a2c..4389a0e7b 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -64,9 +64,8 @@ var _ = Describe("Work API Controller test", func() { Kind: "ConfigMap", }, ObjectMeta: metav1.ObjectMeta{ - Name: manifestConfigMapName, - Namespace: workResourceNamespace.Name, - CreationTimestamp: metav1.Time{}, + Name: manifestConfigMapName, + Namespace: workResourceNamespace.Name, }, Data: map[string]string{ "test-key": "test-data", From cd29a19e674236c7ad5df108939eb7b8e29b0454 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 20:25:28 -0700 Subject: [PATCH 28/36] Added comment for GenerateSpecHash --- test/e2e/utils/helper.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 86076c051..9df1a943f 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -303,6 +303,9 @@ func RandomWorkName(length int) string { } // GenerateSpecHash formats the object and creates a hash value for comparison. +// The Object being passed in should not have the following variables. +// Example: ResourceVersion +// Full List of the variables can be found in the [computeManifestHash](https://github.com/Azure/k8s-work-api/blob/ba21e65fff6bee7282cdfe7e4f189d987ef5502b/pkg/controllers/apply_controller.go#L441) func GenerateSpecHash(object runtime.Object) string { unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) gomega.Expect(err).Should(gomega.Succeed(), "Failed to convert the object %s for formatting", object) From ed8d58d97f082a5cdf809593a0c49267c6d1daec Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Thu, 8 Sep 2022 20:31:38 -0700 Subject: [PATCH 29/36] edited comment for generateSpecHash --- test/e2e/utils/helper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 9df1a943f..c13073f9c 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -305,7 +305,7 @@ func RandomWorkName(length int) string { // GenerateSpecHash formats the object and creates a hash value for comparison. // The Object being passed in should not have the following variables. // Example: ResourceVersion -// Full List of the variables can be found in the [computeManifestHash](https://github.com/Azure/k8s-work-api/blob/ba21e65fff6bee7282cdfe7e4f189d987ef5502b/pkg/controllers/apply_controller.go#L441) +// Full List of the variables can be found in the [computeManifestHash](https://github.com/Azure/k8s-work-api/blob/ba21e65fff6bee7282cdfe7e4f189d987ef5502b/pkg/controllers/apply_controller.go#L441). func GenerateSpecHash(object runtime.Object) string { unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) gomega.Expect(err).Should(gomega.Succeed(), "Failed to convert the object %s for formatting", object) From 019d8a0dd83e3d383ac6844b6aa4d12958b528b9 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Mon, 12 Sep 2022 12:30:32 -0700 Subject: [PATCH 30/36] fixed based on comments. --- test/e2e/e2e_test.go | 5 ---- test/e2e/utils/helper.go | 34 ++++++---------------- test/e2e/work_api_e2e_test.go | 54 ++++++++++++----------------------- 3 files changed, 28 insertions(+), 65 deletions(-) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 06d494bb8..d3c55336a 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -39,9 +39,6 @@ var ( // This namespace in HubCluster will store v1alpha1.Work to simulate Work-related features in Hub Cluster. workNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) - // This namespace in MemberCluster will store resources created from the Work-api. - workResourceNamespace = testutils.NewNamespace("resource-namespace") - // Used to decode an unstructured object. genericCodecs = serializer.NewCodecFactory(scheme) genericCodec = genericCodecs.UniversalDeserializer() @@ -80,12 +77,10 @@ var _ = BeforeSuite(func() { testutils.CreateNamespace(*MemberCluster, memberNamespace) testutils.CreateNamespace(*HubCluster, workNamespace) - testutils.CreateNamespace(*MemberCluster, workResourceNamespace) }) var _ = AfterSuite(func() { testutils.DeleteNamespace(*MemberCluster, memberNamespace) testutils.DeleteNamespace(*HubCluster, workNamespace) - testutils.DeleteNamespace(*MemberCluster, workResourceNamespace) }) diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index c13073f9c..4ef9a2124 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -6,7 +6,6 @@ package utils import ( "context" - "crypto/sha256" "fmt" "time" @@ -17,7 +16,6 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" @@ -32,9 +30,9 @@ import ( var ( // PollInterval defines the interval time for a poll operation. - PollInterval = 5 * time.Second + PollInterval = 250 * time.Millisecond // PollTimeout defines the time after which the poll operation times out. - PollTimeout = 90 * time.Second + PollTimeout = 60 * time.Second ) // NewMemberCluster return a new member cluster. @@ -259,7 +257,7 @@ func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) } // CreateWork creates Work object based on manifest given. -func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName, workNamespace string, manifests []workapi.Manifest) { +func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName, workNamespace string, manifests []workapi.Manifest) workapi.Work { work := workapi.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, @@ -272,16 +270,16 @@ func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName, wor }, } - gomega.Expect(hubCluster.KubeClient.Create(ctx, &work)).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) + err := hubCluster.KubeClient.Create(ctx, &work) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) + return work } // DeleteWork deletes all works used in the current test. func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []workapi.Work) { - if len(works) > 0 { - // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." - for i := range works { - gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work %s failed", works[i].Name) - } + // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." + for i := range works { + gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work %s failed", works[i].Name) } } @@ -301,17 +299,3 @@ func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []work func RandomWorkName(length int) string { return "work" + rand.String(length) } - -// GenerateSpecHash formats the object and creates a hash value for comparison. -// The Object being passed in should not have the following variables. -// Example: ResourceVersion -// Full List of the variables can be found in the [computeManifestHash](https://github.com/Azure/k8s-work-api/blob/ba21e65fff6bee7282cdfe7e4f189d987ef5502b/pkg/controllers/apply_controller.go#L441). -func GenerateSpecHash(object runtime.Object) string { - unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) - gomega.Expect(err).Should(gomega.Succeed(), "Failed to convert the object %s for formatting", object) - unstructured.RemoveNestedField(unstructuredObj, "metadata", "creationTimestamp") - jsonBytes, err := json.Marshal(unstructuredObj) - - gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", jsonBytes) - return fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) -} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 4389a0e7b..633cecfe2 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -12,9 +12,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" ) @@ -37,18 +37,27 @@ var _ = Describe("Work API Controller test", func() { cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime", "ObservedGeneration"), cmpopts.IgnoreFields(metav1.OwnerReference{}, "BlockOwnerDeletion"), + cmpopts.IgnoreFields(workapi.ResourceIdentifier{}, "Ordinal"), } + + resourceNamespace *corev1.Namespace ) BeforeEach(func() { ctx = context.Background() + // This namespace in MemberCluster will store specified test resources created from the Work-api. + resourceNamespaceName := "resource-namespace" + utils.RandStr() + resourceNamespace = testutils.NewNamespace(resourceNamespaceName) + testutils.CreateNamespace(*MemberCluster, resourceNamespace) + //Empties the works since they were garbage collected earlier. works = []workapi.Work{} }) AfterEach(func() { testutils.DeleteWork(ctx, *HubCluster, works) + testutils.DeleteNamespace(*MemberCluster, resourceNamespace) }) It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { @@ -65,7 +74,7 @@ var _ = Describe("Work API Controller test", func() { }, ObjectMeta: metav1.ObjectMeta{ Name: manifestConfigMapName, - Namespace: workResourceNamespace.Name, + Namespace: resourceNamespace.Name, }, Data: map[string]string{ "test-key": "test-data", @@ -79,10 +88,9 @@ var _ = Describe("Work API Controller test", func() { By(fmt.Sprintf("creating work %s of %s", namespaceType, manifestConfigMapName)) testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) - testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) - By(fmt.Sprintf("Applied Condition should be set to True for Work %s", namespaceType)) work := workapi.Work{} + Eventually(func() string { if err := HubCluster.KubeClient.Get(ctx, namespaceType, &work); err != nil { return err.Error() @@ -106,12 +114,10 @@ var _ = Describe("Work API Controller test", func() { { Type: conditionTypeApplied, Status: metav1.ConditionTrue, - // It is possible for the reason to be appliedManifestUpdated - //Reason: "appliedManifestComplete", + Reason: "appliedManifestUpdated", }, }, Identifier: workapi.ResourceIdentifier{ - Ordinal: 0, Group: manifestConfigMap.GroupVersionKind().Group, Version: manifestConfigMap.GroupVersionKind().Version, Kind: manifestConfigMap.GroupVersionKind().Kind, @@ -122,9 +128,7 @@ var _ = Describe("Work API Controller test", func() { }, } - //Excluding Reason for check, since there could be two possible reasons. - options := append(cmpOptions, cmpopts.IgnoreFields(metav1.Condition{}, "Reason")) - Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, options...)).Should(BeEmpty(), + Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, cmpOptions...)).Should(BeEmpty(), "Manifest Condition not matching for work %s (-want, +got):", namespaceType) By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) @@ -136,7 +140,6 @@ var _ = Describe("Work API Controller test", func() { AppliedResources: []workapi.AppliedResourceMeta{ { ResourceIdentifier: workapi.ResourceIdentifier{ - Ordinal: 0, Group: manifestConfigMap.GroupVersionKind().Group, Version: manifestConfigMap.GroupVersionKind().Version, Kind: manifestConfigMap.GroupVersionKind().Kind, @@ -152,13 +155,9 @@ var _ = Describe("Work API Controller test", func() { "Validate AppliedResourceMeta mismatch (-want, +got):") By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) - Eventually(func() string { - cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) - if err != nil { - return err.Error() - } - return cmp.Diff(manifestConfigMap.Data, cm.Data) - }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), + cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) + Expect(err).Should(Succeed()) + Expect(cmp.Diff(manifestConfigMap.Data, cm.Data)).Should(BeEmpty(), "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, namespaceType)) @@ -176,22 +175,7 @@ var _ = Describe("Work API Controller test", func() { Expect(cmp.Diff(wantOwner, configMap.OwnerReferences, cmpOptions...)).Should(BeEmpty(), "OwnerReference mismatch (-want, +got):") By(fmt.Sprintf("Validating that the annotation of resource's spec exists on the resource %s", manifestConfigMapName)) - - // Owner Reference is created when manifests are being applied. - ownerRef := []metav1.OwnerReference{ - { - APIVersion: workapi.GroupVersion.String(), - Kind: workapi.AppliedWorkKind, - Name: appliedWork.GetName(), - UID: appliedWork.GetUID(), - BlockOwnerDeletion: pointer.Bool(false), - }, - } - validateConfigMap := manifestConfigMap.DeepCopy() - validateConfigMap.SetOwnerReferences(ownerRef) - - wantHash := testutils.GenerateSpecHash(validateConfigMap) - Expect(cmp.Diff(wantHash, configMap.ObjectMeta.Annotations[specHashAnnotation])).Should(BeEmpty(), - "Validating SpecHash Annotation failed for resource %s in work %s(-want, +got):", configMap.Name, workName) + Expect(configMap.ObjectMeta.Annotations[specHashAnnotation]).ToNot(BeEmpty(), + "SpecHash Annotation does not exist for resource %s", configMap.Name) }) }) From c018ce4aede02dca80620497500cba5608a2392a Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Mon, 12 Sep 2022 14:15:11 -0700 Subject: [PATCH 31/36] ensuring the MemberCluster CR is deleted. --- test/e2e/join_leave_member_test.go | 12 ++++++++---- test/e2e/utils/helper.go | 6 +++++- test/e2e/work_load_test.go | 2 +- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index 81ed4e6e0..6ecd96fb8 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -5,20 +5,23 @@ Licensed under the MIT license. package e2e import ( + "context" . "github.com/onsi/ginkgo/v2" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "go.goms.io/fleet/apis/v1alpha1" testutils "go.goms.io/fleet/test/e2e/utils" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var _ = Describe("Join/leave member cluster testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount var imc *v1alpha1.InternalMemberCluster + var ctx context.Context BeforeEach(func() { + ctx = context.Background() + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) @@ -35,8 +38,9 @@ var _ = Describe("Join/leave member cluster testing", func() { }) AfterEach(func() { + testutils.DeleteMemberCluster(ctx, *HubCluster, mc) testutils.DeleteServiceAccount(*MemberCluster, sa) - testutils.DeleteMemberCluster(*HubCluster, mc) + }) It("Join & Leave flow is successful ", func() { diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 4ef9a2124..dc9f5d37d 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -106,11 +106,15 @@ func UpdateMemberClusterState(cluster framework.Cluster, mc *v1alpha1.MemberClus } // DeleteMemberCluster deletes MemberCluster in the hub cluster. -func DeleteMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) { +func DeleteMemberCluster(ctx context.Context, cluster framework.Cluster, mc *v1alpha1.MemberCluster) { ginkgo.By(fmt.Sprintf("Deleting MemberCluster(%s)", mc.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), mc) gomega.Expect(err).Should(gomega.Succeed()) }) + + gomega.Eventually(func() bool { + return apierrors.IsNotFound(cluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc)) + }, PollTimeout, PollInterval).Should(gomega.BeTrue(), "Failed to wait for member cluster %s to be deleted in %s cluster", mc.Name, cluster.ClusterName) } // WaitConditionMemberCluster waits for MemberCluster to present on th hub cluster with a specific condition. diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index d53d18028..2de436cec 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -51,7 +51,7 @@ var _ = Describe("workload orchestration testing", func() { }) AfterEach(func() { - testutils.DeleteMemberCluster(*HubCluster, mc) + testutils.DeleteMemberCluster(ctx, *HubCluster, mc) testutils.DeleteServiceAccount(*MemberCluster, sa) }) From 4a881631b956319af917925e60559f4b273734e3 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Mon, 12 Sep 2022 14:25:14 -0700 Subject: [PATCH 32/36] lint fix --- test/e2e/join_leave_member_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index 6ecd96fb8..5b53e28d7 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -6,6 +6,7 @@ package e2e import ( "context" + . "github.com/onsi/ginkgo/v2" "go.goms.io/fleet/apis/v1alpha1" testutils "go.goms.io/fleet/test/e2e/utils" From b76a09a86ca33a795c618364fb124488b16fc622 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 13 Sep 2022 13:30:51 -0700 Subject: [PATCH 33/36] go update to fix vulnerability --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ad54b2a78..0d0f6131d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.18' + GO_VERSION: '1.18.6' jobs: detect-noop: From 4c7f8ea46d6df98bbd7a8f69a5d87fbb1ce584f9 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 13 Sep 2022 14:04:48 -0700 Subject: [PATCH 34/36] trying changing just the trivy --- .github/workflows/ci.yml | 2 +- .github/workflows/trivy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0d0f6131d..ad54b2a78 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.18.6' + GO_VERSION: '1.18' jobs: detect-noop: diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 2e5373994..8a8455a9d 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -19,7 +19,7 @@ env: MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.18' + GO_VERSION: '1.18.6' jobs: export-registry: From c23c8de9c289fa3fe9c5f7d83a8fb56786ba7486 Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 13 Sep 2022 14:47:29 -0700 Subject: [PATCH 35/36] undoing trivy +updating specific module --- .github/workflows/trivy.yml | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 8a8455a9d..2e5373994 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -19,7 +19,7 @@ env: MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.18.6' + GO_VERSION: '1.18' jobs: export-registry: diff --git a/go.mod b/go.mod index ac1d01e37..337f4bf45 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect From ab706084387a6e07dee2f4ddb59aa6321653527d Mon Sep 17 00:00:00 2001 From: Youn Jae Kim Date: Tue, 13 Sep 2022 14:47:52 -0700 Subject: [PATCH 36/36] updating go.mod --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 337f4bf45..3c4c12854 100644 --- a/go.mod +++ b/go.mod @@ -73,7 +73,7 @@ require ( golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index 4ce2a98b7..45f1431a3 100644 --- a/go.sum +++ b/go.sum @@ -619,8 +619,8 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -713,8 +713,8 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=