diff --git a/go.mod b/go.mod index ac1d01e37..3c4c12854 100644 --- a/go.mod +++ b/go.mod @@ -71,9 +71,9 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index 4ce2a98b7..45f1431a3 100644 --- a/go.sum +++ b/go.sum @@ -619,8 +619,8 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -713,8 +713,8 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index ea96d4416..d3c55336a 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -5,11 +5,14 @@ Licensed under the MIT license. package e2e import ( + "embed" + "fmt" "os" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -17,7 +20,9 @@ import ( workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/test/e2e/framework" + testutils "go.goms.io/fleet/test/e2e/utils" ) var ( @@ -27,14 +32,26 @@ var ( MemberCluster = framework.NewCluster(memberClusterName, scheme) hubURL string scheme = runtime.NewScheme() - genericCodecs = serializer.NewCodecFactory(scheme) - genericCodec = genericCodecs.UniversalDeserializer() + + // This namespace will store Member cluster-related CRs, such as v1alpha1.MemberCluster + memberNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + + // This namespace in HubCluster will store v1alpha1.Work to simulate Work-related features in Hub Cluster. + workNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + + // Used to decode an unstructured object. + genericCodecs = serializer.NewCodecFactory(scheme) + genericCodec = genericCodecs.UniversalDeserializer() + + //go:embed manifests + TestManifestFiles embed.FS ) func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) utilruntime.Must(workv1alpha1.AddToScheme(scheme)) + utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) } func TestE2E(t *testing.T) { @@ -44,10 +61,10 @@ func TestE2E(t *testing.T) { var _ = BeforeSuite(func() { kubeconfig := os.Getenv("KUBECONFIG") - Expect(kubeconfig).ShouldNot(BeEmpty()) + Expect(kubeconfig).ShouldNot(BeEmpty(), "Failure to retrieve kubeconfig") hubURL = os.Getenv("HUB_SERVER_URL") - Expect(hubURL).ShouldNot(BeEmpty()) + Expect(hubURL).ShouldNot(BeEmpty(), "Failure to retrieve Hub URL") // hub setup HubCluster.HubURL = hubURL @@ -57,4 +74,13 @@ var _ = BeforeSuite(func() { MemberCluster.HubURL = hubURL framework.GetClusterClient(MemberCluster) + testutils.CreateNamespace(*MemberCluster, memberNamespace) + + testutils.CreateNamespace(*HubCluster, workNamespace) +}) + +var _ = AfterSuite(func() { + testutils.DeleteNamespace(*MemberCluster, memberNamespace) + + testutils.DeleteNamespace(*HubCluster, workNamespace) }) diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index 5df432772..4b8dca286 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -8,6 +8,7 @@ import ( "os" "github.com/onsi/gomega" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" @@ -21,14 +22,16 @@ var ( kubeconfigPath = os.Getenv("KUBECONFIG") ) +// Cluster object defines the required clients based on the kubeconfig of the test cluster. type Cluster struct { - Scheme *runtime.Scheme - KubeClient client.Client - KubeClientSet kubernetes.Interface - DynamicClient dynamic.Interface - ClusterName string - HubURL string - RestMapper meta.RESTMapper + Scheme *runtime.Scheme + KubeClient client.Client + KubeClientSet kubernetes.Interface + APIExtensionClient *clientset.Clientset + DynamicClient dynamic.Interface + ClusterName string + HubURL string + RestMapper meta.RESTMapper } func NewCluster(name string, scheme *runtime.Scheme) *Cluster { @@ -44,20 +47,23 @@ func GetClusterClient(cluster *Cluster) { restConfig, err := clusterConfig.ClientConfig() if err != nil { - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up rest config") } cluster.KubeClient, err = client.New(restConfig, client.Options{Scheme: cluster.Scheme}) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Kube Client") cluster.KubeClientSet, err = kubernetes.NewForConfig(restConfig) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up KubeClient Set") + + cluster.APIExtensionClient, err = clientset.NewForConfig(restConfig) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up API Extension Client") cluster.DynamicClient, err = dynamic.NewForConfig(restConfig) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Dynamic Client") cluster.RestMapper, err = apiutil.NewDynamicRESTMapper(restConfig, apiutil.WithLazyDiscovery) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to set up Rest Mapper") } func GetClientConfig(cluster *Cluster) clientcmd.ClientConfig { diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index eab4b7b62..5b53e28d7 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -6,32 +6,24 @@ package e2e import ( "context" - "fmt" . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "go.goms.io/fleet/apis/v1alpha1" - "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var _ = Describe("Join/leave member cluster testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount - var memberNS *corev1.Namespace var imc *v1alpha1.InternalMemberCluster + var ctx context.Context BeforeEach(func() { - memberNS = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) - By("prepare resources in member cluster") - // create testing NS in member cluster - testutils.CreateNamespace(*MemberCluster, memberNS) - sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNS.Name) + ctx = context.Background() + + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) By("deploy member cluster in the hub cluster") @@ -39,7 +31,7 @@ var _ = Describe("Join/leave member cluster testing", func() { testutils.CreateMemberCluster(*HubCluster, mc) By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNS.Name) + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNamespace.Name) testutils.WaitInternalMemberCluster(*HubCluster, imc) By("check if member cluster is marked as readyToJoin") @@ -47,16 +39,9 @@ var _ = Describe("Join/leave member cluster testing", func() { }) AfterEach(func() { - testutils.DeleteNamespace(*MemberCluster, memberNS) - Eventually(func() bool { - err := MemberCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) - testutils.DeleteMemberCluster(*HubCluster, mc) - Eventually(func() bool { - err := HubCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) + testutils.DeleteMemberCluster(ctx, *HubCluster, mc) + testutils.DeleteServiceAccount(*MemberCluster, sa) + }) It("Join & Leave flow is successful ", func() { diff --git a/test/e2e/manifests/test-configmap.ns.yaml b/test/e2e/manifests/test-configmap2.ns.yaml similarity index 78% rename from test/e2e/manifests/test-configmap.ns.yaml rename to test/e2e/manifests/test-configmap2.ns.yaml index 780a1b358..f627a63b3 100644 --- a/test/e2e/manifests/test-configmap.ns.yaml +++ b/test/e2e/manifests/test-configmap2.ns.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: test-configmap + name: test-configmap2 namespace: test-namespace data: fielda: one diff --git a/test/e2e/manifests/test-configmap2.yaml b/test/e2e/manifests/test-configmap2.yaml new file mode 100644 index 000000000..d3dc4884f --- /dev/null +++ b/test/e2e/manifests/test-configmap2.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-configmap2 + namespace: default +data: + fielda: one + fieldb: two + fieldc: three diff --git a/test/e2e/manifests/test-secret.yaml b/test/e2e/manifests/test-secret.yaml index 707b1613d..c6450fef2 100644 --- a/test/e2e/manifests/test-secret.yaml +++ b/test/e2e/manifests/test-secret.yaml @@ -4,5 +4,5 @@ metadata: name: test-secret namespace: default data: - somekey: Q2xpZW50SWQ6IDUxOTEwNTY4LTM0YzktNGQ0ZS1iODA1LTNmNTY3NWQyMDdiYwpDbGllbnRTZWNyZXQ6IDZSLThRfkJvSDNNYm1+eGJpaDhmNVZibHBkWGxzeGQyRnp+WXhjWjYKVGVuYW50SWQ6IDcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0NwpTdWJzY3JpcHRpb25JZDogMmIwM2JmYjgtZTg4NS00NTY2LWE2MmEtOTA5YTExZDcxNjkyClJlc291cmNlR3JvdXA6IGNhcmF2ZWwtZGVtbw== + somekey: dGVzdA== type: generic diff --git a/test/e2e/manifests/test-serviceaccount.yaml b/test/e2e/manifests/test-serviceaccount.yaml index c2c6600fc..5b5b1f9fa 100644 --- a/test/e2e/manifests/test-serviceaccount.yaml +++ b/test/e2e/manifests/test-serviceaccount.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: test-serviceaccount - namespace: default \ No newline at end of file + namespace: default diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index 2b58a9a96..dc9f5d37d 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -9,23 +9,28 @@ import ( "fmt" "time" + // Lint check prohibits non "_test" ending files to have dot imports for ginkgo / gomega. "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/apis/v1alpha1" + "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/test/e2e/framework" ) var ( // PollInterval defines the interval time for a poll operation. - PollInterval = 5 * time.Second + PollInterval = 250 * time.Millisecond // PollTimeout defines the time after which the poll operation times out. PollTimeout = 60 * time.Second ) @@ -82,7 +87,7 @@ func NewNamespace(name string) *corev1.Namespace { func CreateMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) { ginkgo.By(fmt.Sprintf("Creating MemberCluster(%s)", mc.Name), func() { err := cluster.KubeClient.Create(context.TODO(), mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) klog.Infof("Waiting for MemberCluster(%s) to be synced", mc.Name) gomega.Eventually(func() error { @@ -94,18 +99,22 @@ func CreateMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) // UpdateMemberClusterState updates MemberCluster in the hub cluster. func UpdateMemberClusterState(cluster framework.Cluster, mc *v1alpha1.MemberCluster, state v1alpha1.ClusterState) { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: mc.Name, Namespace: ""}, mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) mc.Spec.State = state err = cluster.KubeClient.Update(context.TODO(), mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) } // DeleteMemberCluster deletes MemberCluster in the hub cluster. -func DeleteMemberCluster(cluster framework.Cluster, mc *v1alpha1.MemberCluster) { +func DeleteMemberCluster(ctx context.Context, cluster framework.Cluster, mc *v1alpha1.MemberCluster) { ginkgo.By(fmt.Sprintf("Deleting MemberCluster(%s)", mc.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), mc) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) + + gomega.Eventually(func() bool { + return apierrors.IsNotFound(cluster.KubeClient.Get(ctx, types.NamespacedName{Name: mc.Name}, mc)) + }, PollTimeout, PollInterval).Should(gomega.BeTrue(), "Failed to wait for member cluster %s to be deleted in %s cluster", mc.Name, cluster.ClusterName) } // WaitConditionMemberCluster waits for MemberCluster to present on th hub cluster with a specific condition. @@ -133,8 +142,9 @@ func WaitInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1.Internal func WaitConditionInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1.InternalMemberCluster, conditionType v1alpha1.AgentConditionType, status metav1.ConditionStatus, customTimeout time.Duration) { klog.Infof("Waiting for InternalMemberCluster(%s) condition(%s) status(%s) to be synced in the %s cluster", imc.Name, conditionType, status, cluster.ClusterName) gomega.Eventually(func() bool { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: imc.Name, Namespace: imc.Namespace}, imc); err != nil { + return false + } cond := imc.GetConditionWithType(v1alpha1.MemberAgent, string(conditionType)) return cond != nil && cond.Status == status }, customTimeout, PollInterval).Should(gomega.Equal(true)) @@ -144,7 +154,7 @@ func WaitConditionInternalMemberCluster(cluster framework.Cluster, imc *v1alpha1 func CreateClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { ginkgo.By(fmt.Sprintf("Creating ClusterRole (%s)", cr.Name), func() { err := cluster.KubeClient.Create(context.TODO(), cr) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -161,7 +171,7 @@ func WaitClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { func DeleteClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { ginkgo.By(fmt.Sprintf("Deleting ClusterRole(%s)", cr.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), cr) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -169,7 +179,7 @@ func DeleteClusterRole(cluster framework.Cluster, cr *rbacv1.ClusterRole) { func CreateClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { ginkgo.By(fmt.Sprintf("Creating ClusterResourcePlacement(%s)", crp.Name), func() { err := cluster.KubeClient.Create(context.TODO(), crp) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) klog.Infof("Waiting for ClusterResourcePlacement(%s) to be synced", crp.Name) gomega.Eventually(func() error { @@ -194,31 +204,34 @@ func WaitConditionClusterResourcePlacement(cluster framework.Cluster, crp *v1alp func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { ginkgo.By(fmt.Sprintf("Deleting ClusterResourcePlacement(%s)", crp.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), crp) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } // WaitWork waits for Work to be present on the hub cluster. -func WaitWork(cluster framework.Cluster, workName, workNamespace string) { - var work workapi.Work +func WaitWork(ctx context.Context, cluster framework.Cluster, workName, workNamespace string) { + name := types.NamespacedName{Name: workName, Namespace: workNamespace} + klog.Infof("Waiting for Work(%s/%s) to be synced", workName, workNamespace) gomega.Eventually(func() error { - err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: workNamespace}, &work) - return err - }, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred()) + var work workapi.Work + + return cluster.KubeClient.Get(ctx, name, &work) + }, PollTimeout, PollInterval).Should(gomega.Succeed(), "Work %s not synced", name) } // CreateNamespace create namespace and waits for namespace to exist. func CreateNamespace(cluster framework.Cluster, ns *corev1.Namespace) { ginkgo.By(fmt.Sprintf("Creating Namespace(%s)", ns.Name), func() { err := cluster.KubeClient.Create(context.TODO(), ns) - gomega.Expect(err).Should(gomega.Succeed()) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to create namespace %s", ns.Name) }) klog.Infof("Waiting for Namespace(%s) to be synced", ns.Name) gomega.Eventually(func() error { err := cluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: ns.Name, Namespace: ""}, ns) + return err - }, PollTimeout, PollInterval).ShouldNot(gomega.HaveOccurred()) + }, PollTimeout, PollInterval).Should(gomega.Succeed()) } // DeleteNamespace delete namespace. @@ -226,7 +239,7 @@ func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) { ginkgo.By(fmt.Sprintf("Deleting Namespace(%s)", ns.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), ns) if err != nil && !apierrors.IsNotFound(err) { - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) } }) } @@ -235,7 +248,7 @@ func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) { func CreateServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) { ginkgo.By(fmt.Sprintf("Creating ServiceAccount(%s)", sa.Name), func() { err := cluster.KubeClient.Create(context.TODO(), sa) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } @@ -243,6 +256,50 @@ func CreateServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) { ginkgo.By(fmt.Sprintf("Delete ServiceAccount(%s)", sa.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), sa) - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + gomega.Expect(err).Should(gomega.Succeed()) }) } + +// CreateWork creates Work object based on manifest given. +func CreateWork(ctx context.Context, hubCluster framework.Cluster, workName, workNamespace string, manifests []workapi.Manifest) workapi.Work { + work := workapi.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: workNamespace, + }, + Spec: workapi.WorkSpec{ + Workload: workapi.WorkloadTemplate{ + Manifests: manifests, + }, + }, + } + + err := hubCluster.KubeClient.Create(ctx, &work) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to create work %s in namespace %v", workName, workNamespace) + return work +} + +// DeleteWork deletes all works used in the current test. +func DeleteWork(ctx context.Context, hubCluster framework.Cluster, works []workapi.Work) { + // Using index instead of work object itself due to lint check "Implicit memory aliasing in for loop." + for i := range works { + gomega.Expect(hubCluster.KubeClient.Delete(ctx, &works[i])).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{}), "Deletion of work %s failed", works[i].Name) + } +} + +// AddManifests adds manifests to be included within a Work. +func AddManifests(objects []runtime.Object, manifests []workapi.Manifest) []workapi.Manifest { + for _, obj := range objects { + rawObj, err := json.Marshal(obj) + gomega.Expect(err).Should(gomega.Succeed(), "Failed to marshal object %+v", obj) + manifests = append(manifests, workapi.Manifest{ + RawExtension: runtime.RawExtension{Object: obj, Raw: rawObj}, + }) + } + return manifests +} + +// RandomWorkName creates a work name in a correct format for e2e tests. +func RandomWorkName(length int) string { + return "work" + rand.String(length) +} diff --git a/test/e2e/work_api_test_utils.go b/test/e2e/utils/work_api_test_utils.go similarity index 69% rename from test/e2e/work_api_test_utils.go rename to test/e2e/utils/work_api_test_utils.go index c15cac3e2..e69932faf 100644 --- a/test/e2e/work_api_test_utils.go +++ b/test/e2e/utils/work_api_test_utils.go @@ -3,35 +3,30 @@ Copyright (c) Microsoft Corporation. Licensed under the MIT license. */ -package e2e +package utils import ( "context" - "embed" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/test/e2e/framework" ) -var ( - //go:embed manifests - testManifestFiles embed.FS -) +// Deprecated: The functions in this file will be removed or moved to helper.go. -type manifestDetails struct { +type ManifestDetails struct { Manifest workapi.Manifest GVK *schema.GroupVersionKind GVR *schema.GroupVersionResource ObjMeta metav1.ObjectMeta } -func createWorkObj(workName string, workNamespace string, manifestDetails []manifestDetails) *workapi.Work { +func CreateWorkObj(workName string, workNamespace string, manifestDetails []ManifestDetails) *workapi.Work { work := &workapi.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, @@ -46,22 +41,22 @@ func createWorkObj(workName string, workNamespace string, manifestDetails []mani return work } -func createWork(work *workapi.Work, hubCluster *framework.Cluster) error { +func CreateWorkOld(work *workapi.Work, hubCluster *framework.Cluster) error { return hubCluster.KubeClient.Create(context.Background(), work) } -func decodeUnstructured(manifest workapi.Manifest) (*unstructured.Unstructured, error) { +func DecodeUnstructured(manifest workapi.Manifest) (*unstructured.Unstructured, error) { unstructuredObj := &unstructured.Unstructured{} err := unstructuredObj.UnmarshalJSON(manifest.Raw) return unstructuredObj, err } -func deleteWorkResource(work *workapi.Work, hubCluster *framework.Cluster) error { +func DeleteWorkResource(work *workapi.Work, hubCluster *framework.Cluster) error { return hubCluster.KubeClient.Delete(context.Background(), work) } -func retrieveAppliedWork(appliedWorkName string, memberCluster *framework.Cluster) (*workapi.AppliedWork, error) { +func RetrieveAppliedWork(appliedWorkName string, memberCluster *framework.Cluster) (*workapi.AppliedWork, error) { retrievedAppliedWork := workapi.AppliedWork{} err := memberCluster.KubeClient.Get(context.Background(), types.NamespacedName{Name: appliedWorkName}, &retrievedAppliedWork) if err != nil { @@ -71,30 +66,24 @@ func retrieveAppliedWork(appliedWorkName string, memberCluster *framework.Cluste return &retrievedAppliedWork, nil } -func retrieveWork(workNamespace string, workName string, hubCluster *framework.Cluster) (*workapi.Work, error) { +func RetrieveWork(workNamespace string, workName string, hubCluster *framework.Cluster) (*workapi.Work, error) { workRetrieved := workapi.Work{} err := hubCluster.KubeClient.Get(context.Background(), types.NamespacedName{Namespace: workNamespace, Name: workName}, &workRetrieved) if err != nil { - println("err still exists") - println(err.Error()) return nil, err } return &workRetrieved, nil } -func updateWork(work *workapi.Work, hubCluster *framework.Cluster) (*workapi.Work, error) { +func UpdateWork(work *workapi.Work, hubCluster *framework.Cluster) (*workapi.Work, error) { err := hubCluster.KubeClient.Update(context.Background(), work) if err != nil { return nil, err } - updatedWork, err := retrieveWork(work.Namespace, work.Name, hubCluster) + updatedWork, err := RetrieveWork(work.Namespace, work.Name, hubCluster) if err != nil { return nil, err } return updatedWork, err } - -func getWorkName(length int) string { - return "work" + rand.String(length) -} diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go new file mode 100644 index 000000000..633cecfe2 --- /dev/null +++ b/test/e2e/work_api_e2e_test.go @@ -0,0 +1,181 @@ +package e2e + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" + + "go.goms.io/fleet/pkg/utils" + testutils "go.goms.io/fleet/test/e2e/utils" +) + +// TODO: when join/leave logic is connected to work-api, join the Hub and Member for this test. +var _ = Describe("Work API Controller test", func() { + + const ( + conditionTypeApplied = "Applied" + specHashAnnotation = "fleet.azure.com/spec-hash" + ) + + var ( + ctx context.Context + + // Includes all works applied to the hub cluster. Used for garbage collection. + works []workapi.Work + + // Comparison Options + cmpOptions = []cmp.Option{ + cmpopts.IgnoreFields(workapi.AppliedResourceMeta{}, "UID"), + cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime", "ObservedGeneration"), + cmpopts.IgnoreFields(metav1.OwnerReference{}, "BlockOwnerDeletion"), + cmpopts.IgnoreFields(workapi.ResourceIdentifier{}, "Ordinal"), + } + + resourceNamespace *corev1.Namespace + ) + + BeforeEach(func() { + ctx = context.Background() + + // This namespace in MemberCluster will store specified test resources created from the Work-api. + resourceNamespaceName := "resource-namespace" + utils.RandStr() + resourceNamespace = testutils.NewNamespace(resourceNamespaceName) + testutils.CreateNamespace(*MemberCluster, resourceNamespace) + + //Empties the works since they were garbage collected earlier. + works = []workapi.Work{} + }) + + AfterEach(func() { + testutils.DeleteWork(ctx, *HubCluster, works) + testutils.DeleteNamespace(*MemberCluster, resourceNamespace) + }) + + It("Upon successful work creation of a single resource, work manifest is applied and resource is created", func() { + workName := testutils.RandomWorkName(5) + + By(fmt.Sprintf("Here is the work Name %s", workName)) + + // Configmap will be included in this work object. + manifestConfigMapName := "work-configmap" + manifestConfigMap := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: manifestConfigMapName, + Namespace: resourceNamespace.Name, + }, + Data: map[string]string{ + "test-key": "test-data", + }, + } + + // Creating types.NamespacedName to use in retrieving objects. + namespaceType := types.NamespacedName{Name: workName, Namespace: workNamespace.Name} + + manifests := testutils.AddManifests([]runtime.Object{&manifestConfigMap}, []workapi.Manifest{}) + By(fmt.Sprintf("creating work %s of %s", namespaceType, manifestConfigMapName)) + testutils.CreateWork(ctx, *HubCluster, workName, workNamespace.Name, manifests) + + By(fmt.Sprintf("Applied Condition should be set to True for Work %s", namespaceType)) + work := workapi.Work{} + + Eventually(func() string { + if err := HubCluster.KubeClient.Get(ctx, namespaceType, &work); err != nil { + return err.Error() + } + + want := []metav1.Condition{ + { + Type: conditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "appliedWorkComplete", + }, + } + + return cmp.Diff(want, work.Status.Conditions, cmpOptions...) + }, testutils.PollTimeout, testutils.PollInterval).Should(BeEmpty(), "Validate WorkStatus mismatch (-want, +got):") + + By(fmt.Sprintf("Manifest Condiitons on Work Objects %s should be applied", namespaceType)) + expectedManifestCondition := []workapi.ManifestCondition{ + { + Conditions: []metav1.Condition{ + { + Type: conditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: "appliedManifestUpdated", + }, + }, + Identifier: workapi.ResourceIdentifier{ + Group: manifestConfigMap.GroupVersionKind().Group, + Version: manifestConfigMap.GroupVersionKind().Version, + Kind: manifestConfigMap.GroupVersionKind().Kind, + Namespace: manifestConfigMap.Namespace, + Name: manifestConfigMap.Name, + Resource: "configmaps", + }, + }, + } + + Expect(cmp.Diff(expectedManifestCondition, work.Status.ManifestConditions, cmpOptions...)).Should(BeEmpty(), + "Manifest Condition not matching for work %s (-want, +got):", namespaceType) + + By(fmt.Sprintf("AppliedWorkStatus should contain the meta for the resource %s", manifestConfigMapName)) + appliedWork := workapi.AppliedWork{} + Expect(MemberCluster.KubeClient.Get(ctx, namespaceType, &appliedWork)).Should(Succeed(), + "Retrieving AppliedWork %s failed", workName) + + want := workapi.AppliedtWorkStatus{ + AppliedResources: []workapi.AppliedResourceMeta{ + { + ResourceIdentifier: workapi.ResourceIdentifier{ + Group: manifestConfigMap.GroupVersionKind().Group, + Version: manifestConfigMap.GroupVersionKind().Version, + Kind: manifestConfigMap.GroupVersionKind().Kind, + Namespace: manifestConfigMap.Namespace, + Name: manifestConfigMap.Name, + Resource: "configmaps", + }, + }, + }, + } + + Expect(cmp.Diff(want, appliedWork.Status, cmpOptions...)).Should(BeEmpty(), + "Validate AppliedResourceMeta mismatch (-want, +got):") + + By(fmt.Sprintf("Resource %s should have been created in cluster %s", manifestConfigMapName, MemberCluster.ClusterName)) + cm, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) + Expect(err).Should(Succeed()) + Expect(cmp.Diff(manifestConfigMap.Data, cm.Data)).Should(BeEmpty(), + "ConfigMap %s was not created in the cluster %s, or configMap data mismatch(-want, +got):", manifestConfigMapName, MemberCluster.ClusterName) + + By(fmt.Sprintf("Validating that the resource %s is owned by the work %s", manifestConfigMapName, namespaceType)) + configMap, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestConfigMap.Namespace).Get(ctx, manifestConfigMapName, metav1.GetOptions{}) + Expect(err).Should(Succeed(), "Retrieving resource %s failed", manifestConfigMap.Name) + wantOwner := []metav1.OwnerReference{ + { + APIVersion: workapi.GroupVersion.String(), + Kind: workapi.AppliedWorkKind, + Name: appliedWork.GetName(), + UID: appliedWork.GetUID(), + }, + } + + Expect(cmp.Diff(wantOwner, configMap.OwnerReferences, cmpOptions...)).Should(BeEmpty(), "OwnerReference mismatch (-want, +got):") + + By(fmt.Sprintf("Validating that the annotation of resource's spec exists on the resource %s", manifestConfigMapName)) + Expect(configMap.ObjectMeta.Annotations[specHashAnnotation]).ToNot(BeEmpty(), + "SpecHash Annotation does not exist for resource %s", configMap.Name) + }) +}) diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index 2641ccff6..86419bcd0 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -7,8 +7,6 @@ package e2e import ( "context" - "fmt" - "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -24,33 +22,20 @@ import ( workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" - fleetutil "go.goms.io/fleet/pkg/utils" + "go.goms.io/fleet/test/e2e/utils" ) const ( - eventuallyTimeout = 10 * time.Second - eventuallyInterval = 500 * time.Millisecond + eventuallyTimeout = 90 // seconds + eventuallyInterval = 1 // seconds ) -var defaultWorkNamespace = fmt.Sprintf(fleetutil.NamespaceNameFormat, MemberCluster.ClusterName) - -var _ = Describe("work-api testing", Ordered, func() { - - wns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaultWorkNamespace, - }, - } - - BeforeAll(func() { - _, err := HubCluster.KubeClientSet.CoreV1().Namespaces().Create(context.Background(), wns, metav1.CreateOptions{}) - Expect(err).Should(SatisfyAny(Succeed(), &fleetutil.AlreadyExistMatcher{})) - }) +var _ = Describe("work-api testing", func() { Context("with a Work resource that has two manifests: Deployment & Service", func() { var createdWork *workapi.Work var err error - var mDetails []manifestDetails + var mDetails []utils.ManifestDetails BeforeEach(func() { mDetails = generateManifestDetails([]string{ @@ -58,28 +43,28 @@ var _ = Describe("work-api testing", Ordered, func() { "manifests/test-service.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, mDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) }) It("should have created: a respective AppliedWork, and the resources specified in the Work's manifests", func() { By("verifying an AppliedWork was created") Eventually(func() error { - _, err := retrieveAppliedWork(createdWork.Name, MemberCluster) + _, err := utils.RetrieveAppliedWork(createdWork.Name, MemberCluster) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -102,7 +87,7 @@ var _ = Describe("work-api testing", Ordered, func() { By("verifying that corresponding conditions were created") Eventually(func() bool { - work, err := retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + work, err := utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) if err != nil { return false } @@ -116,8 +101,8 @@ var _ = Describe("work-api testing", Ordered, func() { var workOne *workapi.Work var workTwo *workapi.Work var err error - var manifestDetailsOne []manifestDetails - var manifestDetailsTwo []manifestDetails + var manifestDetailsOne []utils.ManifestDetails + var manifestDetailsTwo []utils.ManifestDetails BeforeEach(func() { manifestDetailsOne = generateManifestDetails([]string{ @@ -127,35 +112,35 @@ var _ = Describe("work-api testing", Ordered, func() { "manifests/test-deployment.yaml", }) - workOne = createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workOne = utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, manifestDetailsOne, ) - workTwo = createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workTwo = utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, manifestDetailsTwo) }) It("should apply both the works with duplicated manifest", func() { By("creating the work resources") - err = createWork(workOne, HubCluster) + err = utils.CreateWorkOld(workOne, HubCluster) Expect(err).ToNot(HaveOccurred()) - err = createWork(workTwo, HubCluster) + err = utils.CreateWorkOld(workTwo, HubCluster) Expect(err).ToNot(HaveOccurred()) By("Checking the Applied Work status of each to see both are applied.") Eventually(func() bool { - appliedWorkOne, err := retrieveAppliedWork(workOne.Name, MemberCluster) + appliedWorkOne, err := utils.RetrieveAppliedWork(workOne.Name, MemberCluster) if err != nil { return false } - appliedWorkTwo, err := retrieveAppliedWork(workTwo.Name, MemberCluster) + appliedWorkTwo, err := utils.RetrieveAppliedWork(workTwo.Name, MemberCluster) if err != nil { return false } @@ -165,11 +150,11 @@ var _ = Describe("work-api testing", Ordered, func() { By("Checking the work status of each works for verification") Eventually(func() bool { - workOne, err := retrieveWork(workOne.Namespace, workOne.Name, HubCluster) + workOne, err := utils.RetrieveWork(workOne.Namespace, workOne.Name, HubCluster) if err != nil { return false } - workTwo, err := retrieveWork(workTwo.Namespace, workTwo.Name, HubCluster) + workTwo, err := utils.RetrieveWork(workTwo.Namespace, workTwo.Name, HubCluster) if err != nil { return false } @@ -184,12 +169,20 @@ var _ = Describe("work-api testing", Ordered, func() { err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ Name: manifestDetailsOne[0].ObjMeta.Name, Namespace: manifestDetailsOne[0].ObjMeta.Namespace}, &deploy) - Expect(err).Should(Succeed()) + if err != nil { + return 0 + } + err = MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ + Name: manifestDetailsTwo[0].ObjMeta.Name, + Namespace: manifestDetailsTwo[0].ObjMeta.Namespace}, &deploy) + if err != nil { + return 0 + } return len(deploy.OwnerReferences) }, eventuallyTimeout, eventuallyInterval).Should(Equal(2)) By("delete the work two resources") - Expect(deleteWorkResource(workTwo, HubCluster)).To(Succeed()) + Expect(utils.DeleteWorkResource(workTwo, HubCluster)).To(Succeed()) By("Delete one work wont' delete the manifest") Eventually(func() int { @@ -201,7 +194,7 @@ var _ = Describe("work-api testing", Ordered, func() { }, eventuallyTimeout, eventuallyInterval).Should(Equal(1)) By("delete the work one resources") - err = deleteWorkResource(workOne, HubCluster) + err = utils.DeleteWorkResource(workOne, HubCluster) Expect(err).ToNot(HaveOccurred()) Eventually(func() bool { err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ @@ -215,33 +208,33 @@ var _ = Describe("work-api testing", Ordered, func() { Context("updating work with two newly added manifests: configmap & namespace", func() { var createdWork *workapi.Work var err error - var initialManifestDetails []manifestDetails - var addedManifestDetails []manifestDetails + var initialManifestDetails []utils.ManifestDetails + var addedManifestDetails []utils.ManifestDetails BeforeEach(func() { initialManifestDetails = generateManifestDetails([]string{ "manifests/test-secret.yaml", }) addedManifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap.ns.yaml", + "manifests/test-configmap2.ns.yaml", "manifests/test-namespace.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, initialManifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) err = MemberCluster.KubeClientSet.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Delete(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.DeleteOptions{}) @@ -250,29 +243,31 @@ var _ = Describe("work-api testing", Ordered, func() { It("should have created the ConfigMap in the new namespace", func() { By("retrieving the existing work and updating it by adding new manifests") + work := &workapi.Work{} Eventually(func() error { - createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) - createdWork, err = updateWork(createdWork, HubCluster) + if work, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster); err != nil { + return err + } + work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) + work, err = utils.UpdateWork(work, HubCluster) return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) + }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) - By("checking if the ConfigMap was created in the new namespace") + By("checking if the new Namespace was created") Eventually(func() error { - _, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Get(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) + _, err := MemberCluster.KubeClientSet.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - By("checking if the new Namespace was created ") + By("checking if the ConfigMap was created in the new namespace") Eventually(func() error { - _, err := MemberCluster.KubeClientSet.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) + _, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Get(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) + }) }) @@ -280,39 +275,39 @@ var _ = Describe("work-api testing", Ordered, func() { var configMap corev1.ConfigMap var createdWork *workapi.Work var err error - var manifestDetails []manifestDetails + var manifestDetails []utils.ManifestDetails var newDataKey string var newDataValue string BeforeEach(func() { manifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap.yaml", + "manifests/test-configmap2.yaml", }) - newDataKey = getWorkName(5) - newDataValue = getWorkName(5) + newDataKey = utils.RandomWorkName(5) + newDataValue = utils.RandomWorkName(5) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, manifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) }) It("should reapply the manifest's updated spec on the spoke cluster", func() { By("retrieving the existing work and modifying the manifest") Eventually(func() error { - createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + createdWork, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) // Extract and modify the ConfigMap by adding a new key value pair. err = json.Unmarshal(createdWork.Spec.Workload.Manifests[0].Raw, &configMap) @@ -321,7 +316,7 @@ var _ = Describe("work-api testing", Ordered, func() { obj, _, _ := genericCodec.Decode(rawUpdatedManifest, nil, nil) createdWork.Spec.Workload.Manifests[0].Object = obj createdWork.Spec.Workload.Manifests[0].Raw = rawUpdatedManifest - _, err = updateWork(createdWork, HubCluster) + _, err = utils.UpdateWork(createdWork, HubCluster) return err }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) @@ -337,8 +332,8 @@ var _ = Describe("work-api testing", Ordered, func() { var appliedWork *workapi.AppliedWork var createdWork *workapi.Work var err error - var originalManifestDetails []manifestDetails - var replacedManifestDetails []manifestDetails + var originalManifestDetails []utils.ManifestDetails + var replacedManifestDetails []utils.ManifestDetails resourcesStillExist := true BeforeEach(func() { @@ -346,44 +341,43 @@ var _ = Describe("work-api testing", Ordered, func() { "manifests/test-secret.yaml", }) replacedManifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap.yaml", + "manifests/test-configmap2.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, originalManifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) }) It("should have deleted the original Work's resources, and created new resources with the replaced manifests", func() { By("getting the respective AppliedWork") Eventually(func() int { - appliedWork, _ = retrieveAppliedWork(createdWork.Name, MemberCluster) + appliedWork, _ = utils.RetrieveAppliedWork(createdWork.Name, MemberCluster) return len(appliedWork.Status.AppliedResources) }, eventuallyTimeout, eventuallyInterval).Should(Equal(len(originalManifestDetails))) By("updating the Work resource with replaced manifests") Eventually(func() error { - createdWork, err = retrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) + createdWork, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) createdWork.Spec.Workload.Manifests = nil for _, mD := range replacedManifestDetails { createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, mD.Manifest) } - - createdWork, err = updateWork(createdWork, HubCluster) + createdWork, err = utils.UpdateWork(createdWork, HubCluster) return err }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) @@ -422,23 +416,23 @@ var _ = Describe("work-api testing", Ordered, func() { Context("Work deletion", func() { var createdWork *workapi.Work var err error - var manifestDetails []manifestDetails + var manifestDetails []utils.ManifestDetails BeforeEach(func() { manifestDetails = generateManifestDetails([]string{ "manifests/test-secret.yaml", }) - workObj := createWorkObj( - getWorkName(5), - defaultWorkNamespace, + workObj := utils.CreateWorkObj( + utils.RandomWorkName(5), + workNamespace.Name, manifestDetails, ) - err = createWork(workObj, HubCluster) + err = utils.CreateWorkOld(workObj, HubCluster) Expect(err).ToNot(HaveOccurred()) - createdWork, err = retrieveWork(workObj.Namespace, workObj.Name, HubCluster) + createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) Expect(err).ToNot(HaveOccurred()) }) @@ -451,7 +445,7 @@ var _ = Describe("work-api testing", Ordered, func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) By("deleting the Work resource") - err = deleteWorkResource(createdWork, HubCluster) + err = utils.DeleteWorkResource(createdWork, HubCluster) Expect(err).ToNot(HaveOccurred()) By("verifying the resource was garbage collected") @@ -462,21 +456,16 @@ var _ = Describe("work-api testing", Ordered, func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) }) }) - - AfterAll(func() { - err := HubCluster.KubeClient.Delete(context.Background(), wns) - Expect(err).ToNot(HaveOccurred()) - }) }) -func generateManifestDetails(manifestFiles []string) []manifestDetails { - details := make([]manifestDetails, 0, len(manifestFiles)) +func generateManifestDetails(manifestFiles []string) []utils.ManifestDetails { + details := make([]utils.ManifestDetails, 0, len(manifestFiles)) for _, file := range manifestFiles { - detail := manifestDetails{} + detail := utils.ManifestDetails{} // Read files, create manifest - fileRaw, err := testManifestFiles.ReadFile(file) + fileRaw, err := TestManifestFiles.ReadFile(file) Expect(err).ToNot(HaveOccurred()) obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) @@ -491,7 +480,7 @@ func generateManifestDetails(manifestFiles []string) []manifestDetails { Raw: jsonObj}, } - unstructuredObj, err := decodeUnstructured(detail.Manifest) + unstructuredObj, err := utils.DecodeUnstructured(detail.Manifest) Expect(err).ShouldNot(HaveOccurred()) mapping, err := MemberCluster.RestMapper.RESTMapping(unstructuredObj.GroupVersionKind().GroupKind(), unstructuredObj.GroupVersionKind().Version) diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index a5d482d4d..2de436cec 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -7,7 +7,6 @@ package e2e import ( "context" - "fmt" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -18,24 +17,23 @@ import ( "k8s.io/apimachinery/pkg/types" "go.goms.io/fleet/apis/v1alpha1" - "go.goms.io/fleet/pkg/utils" testutils "go.goms.io/fleet/test/e2e/utils" ) var _ = Describe("workload orchestration testing", func() { var mc *v1alpha1.MemberCluster var sa *corev1.ServiceAccount - var memberNS *corev1.Namespace var imc *v1alpha1.InternalMemberCluster var cr *rbacv1.ClusterRole var crp *v1alpha1.ClusterResourcePlacement + var ctx context.Context BeforeEach(func() { - memberNS = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) + ctx = context.Background() + By("prepare resources in member cluster") // create testing NS in member cluster - testutils.CreateNamespace(*MemberCluster, memberNS) - sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNS.Name) + sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) By("deploy member cluster in the hub cluster") @@ -43,7 +41,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateMemberCluster(*HubCluster, mc) By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNS.Name) + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNamespace.Name) testutils.WaitInternalMemberCluster(*HubCluster, imc) By("check if internal member cluster condition is updated to Joined") @@ -53,16 +51,8 @@ var _ = Describe("workload orchestration testing", func() { }) AfterEach(func() { - testutils.DeleteMemberCluster(*HubCluster, mc) - Eventually(func() bool { - err := HubCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) - testutils.DeleteNamespace(*MemberCluster, memberNS) - Eventually(func() bool { - err := MemberCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: memberNS.Name, Namespace: ""}, memberNS) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) + testutils.DeleteMemberCluster(ctx, *HubCluster, mc) + testutils.DeleteServiceAccount(*MemberCluster, sa) }) It("Apply CRP and check if work gets propagated", func() { @@ -104,7 +94,7 @@ var _ = Describe("workload orchestration testing", func() { testutils.CreateClusterResourcePlacement(*HubCluster, crp) By("check if work gets created for cluster resource placement") - testutils.WaitWork(*HubCluster, workName, memberNS.Name) + testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) By("check if cluster resource placement is updated to Scheduled & Applied") testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, 3*testutils.PollTimeout)