diff --git a/cmd/memberagent/main.go b/cmd/memberagent/main.go index 6ffaecedf..39d79875a 100644 --- a/cmd/memberagent/main.go +++ b/cmd/memberagent/main.go @@ -5,6 +5,7 @@ Licensed under the MIT license. package main +//goland:noinspection ALL import ( "context" "encoding/base64" @@ -198,18 +199,19 @@ func Start(ctx context.Context, hubCfg, memberConfig *rest.Config, hubOpts, memb os.Exit(1) } - if err = workcontrollers.NewApplyWorkReconciler( + // create the work controller, so we can pass it to the internal member cluster reconciler + workController := workcontrollers.NewApplyWorkReconciler( hubMgr.GetClient(), spokeDynamicClient, memberMgr.GetClient(), - restMapper, - hubMgr.GetEventRecorderFor("work_controller"), - 5, true).SetupWithManager(hubMgr); err != nil { + restMapper, hubMgr.GetEventRecorderFor("work_controller"), 5, hubOpts.Namespace) + + if err = workController.SetupWithManager(hubMgr); err != nil { klog.ErrorS(err, "unable to create controller", "controller", "Work") return err } - if err = internalmembercluster.NewReconciler(hubMgr.GetClient(), memberMgr.GetClient()).SetupWithManager(hubMgr); err != nil { + if err = internalmembercluster.NewReconciler(hubMgr.GetClient(), memberMgr.GetClient(), workController).SetupWithManager(hubMgr); err != nil { return errors.Wrap(err, "unable to create controller hub_member") } diff --git a/go.mod b/go.mod index 3c4c12854..6710be27e 100644 --- a/go.mod +++ b/go.mod @@ -96,5 +96,5 @@ replace ( golang.org/x/crypto => golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.2 // weird bug that the goland won't compile without this - sigs.k8s.io/work-api => github.com/Azure/k8s-work-api v0.4.2 + sigs.k8s.io/work-api => github.com/Azure/k8s-work-api v0.4.3 ) diff --git a/go.sum b/go.sum index 45f1431a3..cdc62a360 100644 --- a/go.sum +++ b/go.sum @@ -52,8 +52,8 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/Azure/k8s-work-api v0.4.2 h1:Kwl8pmBfiykgWws12ud80TpU9gQNveyR7zlwMutGwGc= -github.com/Azure/k8s-work-api v0.4.2/go.mod h1:FOGJkJ+uxjWlvUgmqUlRcmr4Q2ijocrUO/aLJv827y8= +github.com/Azure/k8s-work-api v0.4.3 h1:fxwO/QZftM3CW9FNl/JTHRQmfbQPa83VwOxR0HadECk= +github.com/Azure/k8s-work-api v0.4.3/go.mod h1:FOGJkJ+uxjWlvUgmqUlRcmr4Q2ijocrUO/aLJv827y8= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= diff --git a/pkg/controllers/internalmembercluster/member_controller.go b/pkg/controllers/internalmembercluster/member_controller.go index 20fefb4d4..2f73a6684 100644 --- a/pkg/controllers/internalmembercluster/member_controller.go +++ b/pkg/controllers/internalmembercluster/member_controller.go @@ -21,6 +21,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/predicate" + workcontrollers "sigs.k8s.io/work-api/pkg/controllers" "go.goms.io/fleet/apis" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" @@ -31,26 +32,35 @@ import ( type Reconciler struct { hubClient client.Client memberClient client.Client - recorder record.EventRecorder + + // the join/leave agent maintains the list of controllers in the member cluster + // so that it can make sure that all the agents on the member cluster have joined/left + // before updating the internal member cluster CR status + workController *workcontrollers.ApplyWorkReconciler + + recorder record.EventRecorder } const ( - eventReasonInternalMemberClusterHealthy = "InternalMemberClusterHealthy" - eventReasonInternalMemberClusterUnhealthy = "InternalMemberClusterUnhealthy" - eventReasonInternalMemberClusterJoined = "InternalMemberClusterJoined" - eventReasonInternalMemberClusterLeft = "InternalMemberClusterLeft" + eventReasonInternalMemberClusterHealthy = "InternalMemberClusterHealthy" + eventReasonInternalMemberClusterUnhealthy = "InternalMemberClusterUnhealthy" + eventReasonInternalMemberClusterJoined = "InternalMemberClusterJoined" + eventReasonInternalMemberClusterFailedToJoin = "InternalMemberClusterFailedToJoin" + eventReasonInternalMemberClusterFailedToLeave = "InternalMemberClusterFailedToLeave" + eventReasonInternalMemberClusterLeft = "InternalMemberClusterLeft" ) // NewReconciler creates a new reconciler for the internalMemberCluster CR -func NewReconciler(hubClient client.Client, memberClient client.Client) *Reconciler { +func NewReconciler(hubClient client.Client, memberClient client.Client, workController *workcontrollers.ApplyWorkReconciler) *Reconciler { return &Reconciler{ - hubClient: hubClient, - memberClient: memberClient, + hubClient: hubClient, + memberClient: memberClient, + workController: workController, } } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.V(3).InfoS("Reconcile", "InternalMemberCluster", req.NamespacedName) + klog.V(2).InfoS("Reconcile", "InternalMemberCluster", req.NamespacedName) var imc fleetv1alpha1.InternalMemberCluster if err := r.hubClient.Get(ctx, req.NamespacedName, &imc); err != nil { @@ -60,6 +70,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu switch imc.Spec.State { case fleetv1alpha1.ClusterStateJoin: + if err := r.startAgents(ctx, &imc); err != nil { + return ctrl.Result{}, err + } updateMemberAgentHeartBeat(&imc) updateHealthErr := r.updateHealth(ctx, &imc) r.markInternalMemberClusterJoined(&imc) @@ -74,6 +87,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{RequeueAfter: time.Second * time.Duration(imc.Spec.HeartbeatPeriodSeconds)}, nil case fleetv1alpha1.ClusterStateLeave: + if err := r.stopAgents(ctx, &imc); err != nil { + return ctrl.Result{}, err + } r.markInternalMemberClusterLeft(&imc) if err := r.updateInternalMemberClusterWithRetry(ctx, &imc); err != nil { klog.ErrorS(err, "failed to update status for %s", klog.KObj(&imc)) @@ -87,9 +103,33 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu } } +// startAgents start all the member agents running on the member cluster +func (r *Reconciler) startAgents(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { + // TODO: handle all the controllers uniformly if we have more + if err := r.workController.Join(ctx); err != nil { + r.markInternalMemberClusterJoinFailed(imc, err) + // ignore the update error since we will return an error anyway + _ = r.updateInternalMemberClusterWithRetry(ctx, imc) + return err + } + return nil +} + +// stopAgents stops all the member agents running on the member cluster +func (r *Reconciler) stopAgents(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { + // TODO: handle all the controllers uniformly if we have more + if err := r.workController.Leave(ctx); err != nil { + r.markInternalMemberClusterLeaveFailed(imc, err) + // ignore the update error since we will return an error anyway + _ = r.updateInternalMemberClusterWithRetry(ctx, imc) + return err + } + return nil +} + // updateHealth collects and updates member cluster resource stats and set ConditionTypeInternalMemberClusterHealth. func (r *Reconciler) updateHealth(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(3).InfoS("updateHealth", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("updateHealth", "InternalMemberCluster", klog.KObj(imc)) if err := r.updateResourceStats(ctx, imc); err != nil { r.markInternalMemberClusterUnhealthy(imc, errors.Wrapf(err, "failed to update resource stats %s", klog.KObj(imc))) @@ -102,7 +142,7 @@ func (r *Reconciler) updateHealth(ctx context.Context, imc *fleetv1alpha1.Intern // updateResourceStats collects and updates resource usage stats of the member cluster. func (r *Reconciler) updateResourceStats(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(5).InfoS("updateResourceStats", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("updateResourceStats", "InternalMemberCluster", klog.KObj(imc)) var nodes corev1.NodeList if err := r.memberClient.List(ctx, &nodes); err != nil { return errors.Wrapf(err, "failed to list nodes for member cluster %s", klog.KObj(imc)) @@ -132,7 +172,7 @@ func (r *Reconciler) updateResourceStats(ctx context.Context, imc *fleetv1alpha1 // updateInternalMemberClusterWithRetry updates InternalMemberCluster status. func (r *Reconciler) updateInternalMemberClusterWithRetry(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(5).InfoS("updateInternalMemberClusterWithRetry", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("updateInternalMemberClusterWithRetry", "InternalMemberCluster", klog.KObj(imc)) backOffPeriod := retry.DefaultBackoff backOffPeriod.Cap = time.Second * time.Duration(imc.Spec.HeartbeatPeriodSeconds) @@ -147,7 +187,7 @@ func (r *Reconciler) updateInternalMemberClusterWithRetry(ctx context.Context, i // updateMemberAgentHeartBeat is used to update member agent heart beat for Internal member cluster. func updateMemberAgentHeartBeat(imc *fleetv1alpha1.InternalMemberCluster) { - klog.V(5).InfoS("update Internal member cluster heartbeat", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("update Internal member cluster heartbeat", "InternalMemberCluster", klog.KObj(imc)) desiredAgentStatus := imc.GetAgentStatus(fleetv1alpha1.MemberAgent) if desiredAgentStatus != nil { desiredAgentStatus.LastReceivedHeartbeat = metav1.Now() @@ -155,7 +195,7 @@ func updateMemberAgentHeartBeat(imc *fleetv1alpha1.InternalMemberCluster) { } func (r *Reconciler) markInternalMemberClusterHealthy(imc apis.ConditionedAgentObj) { - klog.V(5).InfoS("markInternalMemberClusterHealthy", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("markInternalMemberClusterHealthy", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentHealthy), Status: metav1.ConditionTrue, @@ -174,7 +214,7 @@ func (r *Reconciler) markInternalMemberClusterHealthy(imc apis.ConditionedAgentO } func (r *Reconciler) markInternalMemberClusterUnhealthy(imc apis.ConditionedAgentObj, err error) { - klog.V(5).InfoS("markInternalMemberClusterUnhealthy", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("markInternalMemberClusterUnhealthy", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentHealthy), Status: metav1.ConditionFalse, @@ -194,7 +234,7 @@ func (r *Reconciler) markInternalMemberClusterUnhealthy(imc apis.ConditionedAgen } func (r *Reconciler) markInternalMemberClusterJoined(imc apis.ConditionedAgentObj) { - klog.V(5).InfoS("markInternalMemberClusterJoined", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("markInternalMemberClusterJoined", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionTrue, @@ -213,8 +253,28 @@ func (r *Reconciler) markInternalMemberClusterJoined(imc apis.ConditionedAgentOb imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition) } +func (r *Reconciler) markInternalMemberClusterJoinFailed(imc apis.ConditionedAgentObj, err error) { + klog.V(4).InfoS("markInternalMemberCluster join failed", "error", err, "InternalMemberCluster", klog.KObj(imc)) + newCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionUnknown, + Reason: eventReasonInternalMemberClusterFailedToJoin, + Message: err.Error(), + ObservedGeneration: imc.GetGeneration(), + } + + // Joined status changed. + existingCondition := imc.GetConditionWithType(fleetv1alpha1.MemberAgent, newCondition.Type) + if existingCondition == nil || existingCondition.ObservedGeneration != imc.GetGeneration() || existingCondition.Status != newCondition.Status { + r.recorder.Event(imc, corev1.EventTypeNormal, eventReasonInternalMemberClusterFailedToJoin, "internal member cluster failed to join") + klog.ErrorS(err, "agent join failed", "InternalMemberCluster", klog.KObj(imc)) + } + + imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition) +} + func (r *Reconciler) markInternalMemberClusterLeft(imc apis.ConditionedAgentObj) { - klog.V(5).InfoS("markInternalMemberClusterLeft", "InternalMemberCluster", klog.KObj(imc)) + klog.V(4).InfoS("markInternalMemberClusterLeft", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionFalse, @@ -233,6 +293,26 @@ func (r *Reconciler) markInternalMemberClusterLeft(imc apis.ConditionedAgentObj) imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition) } +func (r *Reconciler) markInternalMemberClusterLeaveFailed(imc apis.ConditionedAgentObj, err error) { + klog.V(4).InfoS("markInternalMemberCluster leave failed", "error", err, "InternalMemberCluster", klog.KObj(imc)) + newCondition := metav1.Condition{ + Type: string(fleetv1alpha1.AgentJoined), + Status: metav1.ConditionUnknown, + Reason: eventReasonInternalMemberClusterFailedToLeave, + Message: err.Error(), + ObservedGeneration: imc.GetGeneration(), + } + + // Joined status changed. + existingCondition := imc.GetConditionWithType(fleetv1alpha1.MemberAgent, newCondition.Type) + if existingCondition == nil || existingCondition.ObservedGeneration != imc.GetGeneration() || existingCondition.Status != newCondition.Status { + r.recorder.Event(imc, corev1.EventTypeNormal, eventReasonInternalMemberClusterFailedToLeave, "internal member cluster failed to leave") + klog.ErrorS(err, "agent leave failed", "InternalMemberCluster", klog.KObj(imc)) + } + + imc.SetConditionsWithType(fleetv1alpha1.MemberAgent, newCondition) +} + // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { r.recorder = mgr.GetEventRecorderFor("InternalMemberClusterController") diff --git a/pkg/controllers/internalmembercluster/member_controller_integration_test.go b/pkg/controllers/internalmembercluster/member_controller_integration_test.go index 78b376e48..766a9b755 100644 --- a/pkg/controllers/internalmembercluster/member_controller_integration_test.go +++ b/pkg/controllers/internalmembercluster/member_controller_integration_test.go @@ -15,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + workcontrollers "sigs.k8s.io/work-api/pkg/controllers" "go.goms.io/fleet/apis/v1alpha1" "go.goms.io/fleet/pkg/utils" @@ -57,7 +58,9 @@ var _ = Describe("Test Internal Member Cluster Controller", func() { } By("create the internalMemberCluster reconciler") - r = NewReconciler(k8sClient, k8sClient) + workController := workcontrollers.NewApplyWorkReconciler( + k8sClient, nil, k8sClient, nil, nil, 5, memberClusterNamespace) + r = NewReconciler(k8sClient, k8sClient, workController) err := r.SetupWithManager(mgr) Expect(err).ToNot(HaveOccurred()) }) diff --git a/pkg/controllers/internalmembercluster/member_suite_test.go b/pkg/controllers/internalmembercluster/member_suite_test.go index 49784590c..1c7914188 100644 --- a/pkg/controllers/internalmembercluster/member_suite_test.go +++ b/pkg/controllers/internalmembercluster/member_suite_test.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" "go.goms.io/fleet/apis/v1alpha1" ) @@ -60,6 +61,9 @@ var _ = BeforeSuite(func() { err = v1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = workv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + //+kubebuilder:scaffold:scheme By("construct the k8s client") k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) diff --git a/pkg/controllers/memberclusterplacement/membercluster_controller.go b/pkg/controllers/memberclusterplacement/membercluster_controller.go index cf58bbdf0..d32d1c23e 100644 --- a/pkg/controllers/memberclusterplacement/membercluster_controller.go +++ b/pkg/controllers/memberclusterplacement/membercluster_controller.go @@ -63,7 +63,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct klog.ErrorS(err, "failed to convert a cluster resource placement", "memberCluster", memberClusterName, "crp", uObj.GetName()) return ctrl.Result{}, err } - if matchPlacement(&placement, mObj.(*unstructured.Unstructured).DeepCopy()) { + if mObj == nil { + // This is a corner case that the member cluster is deleted before we handle its status change. We can't use match since we don't have its label. + klog.V(3).InfoS("enqueue a placement to reconcile for a deleted member cluster", "memberCluster", memberClusterName, "placement", klog.KObj(&placement)) + r.PlacementController.Enqueue(crpList[i]) + } else if matchPlacement(&placement, mObj.(*unstructured.Unstructured).DeepCopy()) { klog.V(3).InfoS("enqueue a placement to reconcile", "memberCluster", memberClusterName, "placement", klog.KObj(&placement)) r.PlacementController.Enqueue(crpList[i]) } diff --git a/test/e2e/README.md b/test/e2e/README.md index 145b428e6..3d56a755f 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -25,6 +25,7 @@ make run-e2e ``` or test manually ```shell +kubectl --context=kind-hub-testing delete ns local-path-storage kubectl --context=kind-hub-testing apply -f examples/fleet_v1alpha1_membercluster.yaml kubectl --context=kind-hub-testing apply -f test/integration/manifests/resources kubectl --context=kind-hub-testing apply -f test/integration/manifests/resources @@ -46,4 +47,5 @@ kubectl --context=kind-member-testing -n fleet-system get pod 5.uninstall the resources ```shell make uninstall-helm +make clean-e2e-tests ``` \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index d3c55336a..bf8ca16fc 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -14,7 +14,6 @@ import ( . "github.com/onsi/gomega" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1" @@ -39,10 +38,6 @@ var ( // This namespace in HubCluster will store v1alpha1.Work to simulate Work-related features in Hub Cluster. workNamespace = testutils.NewNamespace(fmt.Sprintf(utils.NamespaceNameFormat, MemberCluster.ClusterName)) - // Used to decode an unstructured object. - genericCodecs = serializer.NewCodecFactory(scheme) - genericCodec = genericCodecs.UniversalDeserializer() - //go:embed manifests TestManifestFiles embed.FS ) diff --git a/test/e2e/join_leave_member_test.go b/test/e2e/join_leave_member_test.go index 5b53e28d7..0f4da179a 100644 --- a/test/e2e/join_leave_member_test.go +++ b/test/e2e/join_leave_member_test.go @@ -8,10 +8,11 @@ import ( "context" . "github.com/onsi/ginkgo/v2" - "go.goms.io/fleet/apis/v1alpha1" - testutils "go.goms.io/fleet/test/e2e/utils" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "go.goms.io/fleet/apis/v1alpha1" + testutils "go.goms.io/fleet/test/e2e/utils" ) var _ = Describe("Join/leave member cluster testing", func() { diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go index dc9f5d37d..b0632fc49 100644 --- a/test/e2e/utils/helper.go +++ b/test/e2e/utils/helper.go @@ -204,7 +204,7 @@ func WaitConditionClusterResourcePlacement(cluster framework.Cluster, crp *v1alp func DeleteClusterResourcePlacement(cluster framework.Cluster, crp *v1alpha1.ClusterResourcePlacement) { ginkgo.By(fmt.Sprintf("Deleting ClusterResourcePlacement(%s)", crp.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), crp) - gomega.Expect(err).Should(gomega.Succeed()) + gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{})) }) } @@ -239,7 +239,7 @@ func DeleteNamespace(cluster framework.Cluster, ns *corev1.Namespace) { ginkgo.By(fmt.Sprintf("Deleting Namespace(%s)", ns.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), ns) if err != nil && !apierrors.IsNotFound(err) { - gomega.Expect(err).Should(gomega.Succeed()) + gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{})) } }) } @@ -256,7 +256,7 @@ func CreateServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) func DeleteServiceAccount(cluster framework.Cluster, sa *corev1.ServiceAccount) { ginkgo.By(fmt.Sprintf("Delete ServiceAccount(%s)", sa.Name), func() { err := cluster.KubeClient.Delete(context.TODO(), sa) - gomega.Expect(err).Should(gomega.Succeed()) + gomega.Expect(err).Should(gomega.SatisfyAny(gomega.Succeed(), &utils.NotFoundMatcher{})) }) } diff --git a/test/e2e/work_api_e2e_test.go b/test/e2e/work_api_e2e_test.go index 633cecfe2..a1c03a811 100644 --- a/test/e2e/work_api_e2e_test.go +++ b/test/e2e/work_api_e2e_test.go @@ -18,8 +18,8 @@ import ( testutils "go.goms.io/fleet/test/e2e/utils" ) -// TODO: when join/leave logic is connected to work-api, join the Hub and Member for this test. -var _ = Describe("Work API Controller test", func() { +// TODO: enable this when join/leave logic is connected to work-api, join the Hub and Member for this test. +var _ = XDescribe("Work API Controller test", func() { const ( conditionTypeApplied = "Applied" diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go deleted file mode 100644 index 86419bcd0..000000000 --- a/test/e2e/work_api_test.go +++ /dev/null @@ -1,500 +0,0 @@ -/* -Copyright (c) Microsoft Corporation. -Licensed under the MIT license. -*/ - -package e2e - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" - workapi "sigs.k8s.io/work-api/pkg/apis/v1alpha1" - - fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" - "go.goms.io/fleet/test/e2e/utils" -) - -const ( - eventuallyTimeout = 90 // seconds - eventuallyInterval = 1 // seconds -) - -var _ = Describe("work-api testing", func() { - - Context("with a Work resource that has two manifests: Deployment & Service", func() { - var createdWork *workapi.Work - var err error - var mDetails []utils.ManifestDetails - - BeforeEach(func() { - mDetails = generateManifestDetails([]string{ - "manifests/test-deployment.yaml", - "manifests/test-service.yaml", - }) - - workObj := utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - mDetails, - ) - - err = utils.CreateWorkOld(workObj, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - err = utils.DeleteWorkResource(createdWork, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should have created: a respective AppliedWork, and the resources specified in the Work's manifests", func() { - By("verifying an AppliedWork was created") - Eventually(func() error { - _, err := utils.RetrieveAppliedWork(createdWork.Name, MemberCluster) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - By("verifying a deployment was created") - Eventually(func() error { - _, err := MemberCluster.KubeClientSet.AppsV1().Deployments(mDetails[0].ObjMeta.Namespace). - Get(context.Background(), mDetails[0].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - By("verifying a service was created") - Eventually(func() error { - _, err := MemberCluster.KubeClientSet.CoreV1().Services(mDetails[1].ObjMeta.Namespace). - Get(context.Background(), mDetails[1].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - By("verifying that corresponding conditions were created") - Eventually(func() bool { - work, err := utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) - if err != nil { - return false - } - appliedCondition := meta.IsStatusConditionTrue(work.Status.Conditions, "Applied") - return appliedCondition - }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - }) - }) - - Context("with two resource for the same resource: Deployment", func() { - var workOne *workapi.Work - var workTwo *workapi.Work - var err error - var manifestDetailsOne []utils.ManifestDetails - var manifestDetailsTwo []utils.ManifestDetails - - BeforeEach(func() { - manifestDetailsOne = generateManifestDetails([]string{ - "manifests/test-deployment.yaml", - }) - manifestDetailsTwo = generateManifestDetails([]string{ - "manifests/test-deployment.yaml", - }) - - workOne = utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - manifestDetailsOne, - ) - - workTwo = utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - manifestDetailsTwo) - - }) - - It("should apply both the works with duplicated manifest", func() { - By("creating the work resources") - err = utils.CreateWorkOld(workOne, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - err = utils.CreateWorkOld(workTwo, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Checking the Applied Work status of each to see both are applied.") - Eventually(func() bool { - appliedWorkOne, err := utils.RetrieveAppliedWork(workOne.Name, MemberCluster) - if err != nil { - return false - } - - appliedWorkTwo, err := utils.RetrieveAppliedWork(workTwo.Name, MemberCluster) - if err != nil { - return false - } - - return len(appliedWorkOne.Status.AppliedResources) == 1 && len(appliedWorkTwo.Status.AppliedResources) == 1 - }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - - By("Checking the work status of each works for verification") - Eventually(func() bool { - workOne, err := utils.RetrieveWork(workOne.Namespace, workOne.Name, HubCluster) - if err != nil { - return false - } - workTwo, err := utils.RetrieveWork(workTwo.Namespace, workTwo.Name, HubCluster) - if err != nil { - return false - } - workOneCondition := meta.IsStatusConditionTrue(workOne.Status.ManifestConditions[0].Conditions, string(fleetv1alpha1.ResourcePlacementStatusConditionTypeApplied)) - workTwoCondition := meta.IsStatusConditionTrue(workTwo.Status.ManifestConditions[0].Conditions, string(fleetv1alpha1.ResourcePlacementStatusConditionTypeApplied)) - return workOneCondition && workTwoCondition - }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - - By("verifying the one resource on the spoke are owned by both appliedWork") - var deploy appsv1.Deployment - Eventually(func() int { - err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ - Name: manifestDetailsOne[0].ObjMeta.Name, - Namespace: manifestDetailsOne[0].ObjMeta.Namespace}, &deploy) - if err != nil { - return 0 - } - err = MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ - Name: manifestDetailsTwo[0].ObjMeta.Name, - Namespace: manifestDetailsTwo[0].ObjMeta.Namespace}, &deploy) - if err != nil { - return 0 - } - return len(deploy.OwnerReferences) - }, eventuallyTimeout, eventuallyInterval).Should(Equal(2)) - - By("delete the work two resources") - Expect(utils.DeleteWorkResource(workTwo, HubCluster)).To(Succeed()) - - By("Delete one work wont' delete the manifest") - Eventually(func() int { - err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ - Name: manifestDetailsOne[0].ObjMeta.Name, - Namespace: manifestDetailsOne[0].ObjMeta.Namespace}, &deploy) - Expect(err).Should(Succeed()) - return len(deploy.OwnerReferences) - }, eventuallyTimeout, eventuallyInterval).Should(Equal(1)) - - By("delete the work one resources") - err = utils.DeleteWorkResource(workOne, HubCluster) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - err := MemberCluster.KubeClient.Get(context.Background(), types.NamespacedName{ - Name: manifestDetailsOne[0].ObjMeta.Name, - Namespace: manifestDetailsOne[0].ObjMeta.Namespace}, &deploy) - return apierrors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - }) - }) - - Context("updating work with two newly added manifests: configmap & namespace", func() { - var createdWork *workapi.Work - var err error - var initialManifestDetails []utils.ManifestDetails - var addedManifestDetails []utils.ManifestDetails - - BeforeEach(func() { - initialManifestDetails = generateManifestDetails([]string{ - "manifests/test-secret.yaml", - }) - addedManifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap2.ns.yaml", - "manifests/test-namespace.yaml", - }) - - workObj := utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - initialManifestDetails, - ) - - err = utils.CreateWorkOld(workObj, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - err = utils.DeleteWorkResource(createdWork, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - err = MemberCluster.KubeClientSet.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Delete(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should have created the ConfigMap in the new namespace", func() { - By("retrieving the existing work and updating it by adding new manifests") - work := &workapi.Work{} - Eventually(func() error { - if work, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster); err != nil { - return err - } - - work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, addedManifestDetails[0].Manifest, addedManifestDetails[1].Manifest) - work, err = utils.UpdateWork(work, HubCluster) - return err - }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) - - By("checking if the new Namespace was created") - Eventually(func() error { - _, err := MemberCluster.KubeClientSet.CoreV1().Namespaces().Get(context.Background(), addedManifestDetails[1].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - By("checking if the ConfigMap was created in the new namespace") - Eventually(func() error { - _, err := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(addedManifestDetails[0].ObjMeta.Namespace).Get(context.Background(), addedManifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - }) - }) - - Context("work update with a modified Manifest", func() { - var configMap corev1.ConfigMap - var createdWork *workapi.Work - var err error - var manifestDetails []utils.ManifestDetails - var newDataKey string - var newDataValue string - - BeforeEach(func() { - manifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap2.yaml", - }) - newDataKey = utils.RandomWorkName(5) - newDataValue = utils.RandomWorkName(5) - - workObj := utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - manifestDetails, - ) - - err = utils.CreateWorkOld(workObj, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - err = utils.DeleteWorkResource(createdWork, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should reapply the manifest's updated spec on the spoke cluster", func() { - By("retrieving the existing work and modifying the manifest") - Eventually(func() error { - createdWork, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) - - // Extract and modify the ConfigMap by adding a new key value pair. - err = json.Unmarshal(createdWork.Spec.Workload.Manifests[0].Raw, &configMap) - configMap.Data[newDataKey] = newDataValue - rawUpdatedManifest, _ := json.Marshal(configMap) - obj, _, _ := genericCodec.Decode(rawUpdatedManifest, nil, nil) - createdWork.Spec.Workload.Manifests[0].Object = obj - createdWork.Spec.Workload.Manifests[0].Raw = rawUpdatedManifest - _, err = utils.UpdateWork(createdWork, HubCluster) - return err - }, eventuallyTimeout, eventuallyInterval).Should(Succeed()) - - By("verifying if the manifest was reapplied") - Eventually(func() bool { - configMap, _ := MemberCluster.KubeClientSet.CoreV1().ConfigMaps(manifestDetails[0].ObjMeta.Namespace).Get(context.Background(), manifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) - return configMap.Data[newDataKey] == newDataValue - }, eventuallyTimeout, eventuallyInterval).Should(BeTrue()) - }) - }) - - Context("with all manifests replaced", func() { - var appliedWork *workapi.AppliedWork - var createdWork *workapi.Work - var err error - var originalManifestDetails []utils.ManifestDetails - var replacedManifestDetails []utils.ManifestDetails - resourcesStillExist := true - - BeforeEach(func() { - originalManifestDetails = generateManifestDetails([]string{ - "manifests/test-secret.yaml", - }) - replacedManifestDetails = generateManifestDetails([]string{ - "manifests/test-configmap2.yaml", - }) - - workObj := utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - originalManifestDetails, - ) - - err = utils.CreateWorkOld(workObj, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - err = utils.DeleteWorkResource(createdWork, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should have deleted the original Work's resources, and created new resources with the replaced manifests", func() { - By("getting the respective AppliedWork") - Eventually(func() int { - appliedWork, _ = utils.RetrieveAppliedWork(createdWork.Name, MemberCluster) - - return len(appliedWork.Status.AppliedResources) - }, eventuallyTimeout, eventuallyInterval).Should(Equal(len(originalManifestDetails))) - - By("updating the Work resource with replaced manifests") - Eventually(func() error { - createdWork, err = utils.RetrieveWork(createdWork.Namespace, createdWork.Name, HubCluster) - createdWork.Spec.Workload.Manifests = nil - for _, mD := range replacedManifestDetails { - createdWork.Spec.Workload.Manifests = append(createdWork.Spec.Workload.Manifests, mD.Manifest) - } - createdWork, err = utils.UpdateWork(createdWork, HubCluster) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - By("verifying all the initial Work owned resources were deleted") - Eventually(func() bool { - for resourcesStillExist == true { - for _, ar := range appliedWork.Status.AppliedResources { - gvr := schema.GroupVersionResource{ - Group: ar.Group, - Version: ar.Version, - Resource: ar.Resource, - } - - _, err = MemberCluster.DynamicClient.Resource(gvr).Namespace(ar.Namespace).Get(context.Background(), ar.Name, metav1.GetOptions{}) - if err != nil { - resourcesStillExist = false - } else { - resourcesStillExist = true - } - } - } - - return resourcesStillExist - }, eventuallyTimeout, eventuallyInterval).ShouldNot(BeTrue()) - - By("verifying the new manifest was applied") - Eventually(func() error { - _, err = MemberCluster.KubeClientSet.CoreV1().ConfigMaps(replacedManifestDetails[0].ObjMeta.Namespace).Get(context.Background(), replacedManifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - }) - }) - - Context("Work deletion", func() { - var createdWork *workapi.Work - var err error - var manifestDetails []utils.ManifestDetails - - BeforeEach(func() { - manifestDetails = generateManifestDetails([]string{ - "manifests/test-secret.yaml", - }) - - workObj := utils.CreateWorkObj( - utils.RandomWorkName(5), - workNamespace.Name, - manifestDetails, - ) - - err = utils.CreateWorkOld(workObj, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - createdWork, err = utils.RetrieveWork(workObj.Namespace, workObj.Name, HubCluster) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should delete the Work and verify the resource has been garbage collected", func() { - By("verifying the manifest was applied") - Eventually(func() error { - _, err = MemberCluster.KubeClientSet.CoreV1().Secrets(manifestDetails[0].ObjMeta.Namespace).Get(context.Background(), manifestDetails[0].ObjMeta.Name, metav1.GetOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - - By("deleting the Work resource") - err = utils.DeleteWorkResource(createdWork, HubCluster) - Expect(err).ToNot(HaveOccurred()) - - By("verifying the resource was garbage collected") - Eventually(func() error { - err = MemberCluster.KubeClientSet.CoreV1().Secrets(manifestDetails[0].ObjMeta.Namespace).Delete(context.Background(), manifestDetails[0].ObjMeta.Name, metav1.DeleteOptions{}) - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(HaveOccurred()) - }) - }) -}) - -func generateManifestDetails(manifestFiles []string) []utils.ManifestDetails { - details := make([]utils.ManifestDetails, 0, len(manifestFiles)) - - for _, file := range manifestFiles { - detail := utils.ManifestDetails{} - - // Read files, create manifest - fileRaw, err := TestManifestFiles.ReadFile(file) - Expect(err).ToNot(HaveOccurred()) - - obj, gvk, err := genericCodec.Decode(fileRaw, nil, nil) - Expect(err).ToNot(HaveOccurred()) - - jsonObj, err := json.Marshal(obj) - Expect(err).ToNot(HaveOccurred()) - - detail.Manifest = workapi.Manifest{ - RawExtension: runtime.RawExtension{ - Object: obj, - Raw: jsonObj}, - } - - unstructuredObj, err := utils.DecodeUnstructured(detail.Manifest) - Expect(err).ShouldNot(HaveOccurred()) - - mapping, err := MemberCluster.RestMapper.RESTMapping(unstructuredObj.GroupVersionKind().GroupKind(), unstructuredObj.GroupVersionKind().Version) - Expect(err).ShouldNot(HaveOccurred()) - - detail.GVK = gvk - detail.GVR = &mapping.Resource - detail.ObjMeta = metav1.ObjectMeta{ - Name: unstructuredObj.GetName(), - Namespace: unstructuredObj.GetNamespace(), - } - - details = append(details, detail) - } - - return details -} diff --git a/test/e2e/work_load_test.go b/test/e2e/work_load_test.go index 2de436cec..8b9c007a8 100644 --- a/test/e2e/work_load_test.go +++ b/test/e2e/work_load_test.go @@ -12,7 +12,6 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -22,47 +21,114 @@ import ( var _ = Describe("workload orchestration testing", func() { var mc *v1alpha1.MemberCluster - var sa *corev1.ServiceAccount var imc *v1alpha1.InternalMemberCluster - var cr *rbacv1.ClusterRole + var sa *corev1.ServiceAccount var crp *v1alpha1.ClusterResourcePlacement var ctx context.Context BeforeEach(func() { ctx = context.Background() - By("prepare resources in member cluster") // create testing NS in member cluster sa = testutils.NewServiceAccount(MemberCluster.ClusterName, memberNamespace.Name) testutils.CreateServiceAccount(*MemberCluster, sa) - - By("deploy member cluster in the hub cluster") - mc = testutils.NewMemberCluster(MemberCluster.ClusterName, 60, v1alpha1.ClusterStateJoin) - testutils.CreateMemberCluster(*HubCluster, mc) - - By("check if internal member cluster created in the hub cluster") - imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNamespace.Name) - testutils.WaitInternalMemberCluster(*HubCluster, imc) - - By("check if internal member cluster condition is updated to Joined") - testutils.WaitConditionInternalMemberCluster(*HubCluster, imc, v1alpha1.AgentJoined, v1.ConditionTrue, 3*testutils.PollTimeout) - By("check if member cluster condition is updated to Joined") - testutils.WaitConditionMemberCluster(*HubCluster, mc, v1alpha1.ConditionTypeMemberClusterJoined, v1.ConditionTrue, 3*testutils.PollTimeout) }) AfterEach(func() { + By("delete the member cluster") testutils.DeleteMemberCluster(ctx, *HubCluster, mc) testutils.DeleteServiceAccount(*MemberCluster, sa) + testutils.DeleteClusterResourcePlacement(*HubCluster, crp) }) - It("Apply CRP and check if work gets propagated", func() { - workName := "resource-label-selector" + Context("Test Workload Orchestration", func() { + BeforeEach(func() { + By("deploy member cluster in the hub cluster") + mc = testutils.NewMemberCluster(MemberCluster.ClusterName, 60, v1alpha1.ClusterStateJoin) + testutils.CreateMemberCluster(*HubCluster, mc) + + By("check if internal member cluster created in the hub cluster") + imc = testutils.NewInternalMemberCluster(MemberCluster.ClusterName, memberNamespace.Name) + testutils.WaitInternalMemberCluster(*HubCluster, imc) + + By("check if internal member cluster condition is updated to Joined") + testutils.WaitConditionInternalMemberCluster(*HubCluster, imc, v1alpha1.AgentJoined, v1.ConditionTrue, testutils.PollTimeout) + By("check if member cluster condition is updated to Joined") + testutils.WaitConditionMemberCluster(*HubCluster, mc, v1alpha1.ConditionTypeMemberClusterJoined, v1.ConditionTrue, testutils.PollTimeout) + }) + + AfterEach(func() { + By("update member cluster in the hub cluster") + testutils.UpdateMemberClusterState(*HubCluster, mc, v1alpha1.ClusterStateLeave) + + By("check if internal member cluster condition is updated to Left") + testutils.WaitConditionInternalMemberCluster(*HubCluster, imc, v1alpha1.AgentJoined, v1.ConditionFalse, testutils.PollTimeout) + + By("check if member cluster is marked as notReadyToJoin") + testutils.WaitConditionMemberCluster(*HubCluster, mc, v1alpha1.ConditionTypeMemberClusterReadyToJoin, v1.ConditionFalse, testutils.PollTimeout) + }) + + It("Apply CRP and check if work gets propagated", func() { + workName := "resource-label-selector" + labelKey := "fleet.azure.com/name" + labelValue := "test" + By("create the resources to be propagated") + cr := &rbacv1.ClusterRole{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster-role", + Labels: map[string]string{labelKey: labelValue}, + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"get", "list", "watch"}, + APIGroups: []string{""}, + Resources: []string{"secrets"}, + }, + }, + } + testutils.CreateClusterRole(*HubCluster, cr) + + By("create the cluster resource placement in the hub cluster") + crp = &v1alpha1.ClusterResourcePlacement{ + ObjectMeta: v1.ObjectMeta{Name: "resource-label-selector"}, + Spec: v1alpha1.ClusterResourcePlacementSpec{ + ResourceSelectors: []v1alpha1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + LabelSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"fleet.azure.com/name": "test"}, + }, + }, + }, + }, + } + testutils.CreateClusterResourcePlacement(*HubCluster, crp) + + By("check if work gets created for cluster resource placement") + testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) + + By("check if cluster resource placement is updated to Scheduled & Applied") + testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, testutils.PollTimeout) + testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), v1.ConditionTrue, testutils.PollTimeout) + + By("check if resource is propagated to member cluster") + testutils.WaitClusterRole(*MemberCluster, cr) + + By("delete cluster role on hub cluster") + testutils.DeleteClusterRole(*HubCluster, cr) + }) + }) + + It("Test join and leave with CRP", func() { + cprName := "join-leave-test2" labelKey := "fleet.azure.com/name" labelValue := "test" By("create the resources to be propagated") - cr = &rbacv1.ClusterRole{ + cr := &rbacv1.ClusterRole{ ObjectMeta: v1.ObjectMeta{ - Name: "test-cluster-role", + Name: "test2", Labels: map[string]string{labelKey: labelValue}, }, Rules: []rbacv1.PolicyRule{ @@ -77,7 +143,9 @@ var _ = Describe("workload orchestration testing", func() { By("create the cluster resource placement in the hub cluster") crp = &v1alpha1.ClusterResourcePlacement{ - ObjectMeta: v1.ObjectMeta{Name: "resource-label-selector"}, + ObjectMeta: v1.ObjectMeta{ + Name: cprName, + }, Spec: v1alpha1.ClusterResourcePlacementSpec{ ResourceSelectors: []v1alpha1.ClusterResourceSelector{ { @@ -85,7 +153,7 @@ var _ = Describe("workload orchestration testing", func() { Version: "v1", Kind: "ClusterRole", LabelSelector: &v1.LabelSelector{ - MatchLabels: map[string]string{"fleet.azure.com/name": "test"}, + MatchLabels: cr.Labels, }, }, }, @@ -93,22 +161,44 @@ var _ = Describe("workload orchestration testing", func() { } testutils.CreateClusterResourcePlacement(*HubCluster, crp) - By("check if work gets created for cluster resource placement") - testutils.WaitWork(ctx, *HubCluster, workName, memberNamespace.Name) + By("verify the resource is not propagated to member cluster") + Consistently(func() error { + return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name, Namespace: ""}, cr) + }, testutils.PollTimeout, testutils.PollInterval).ShouldNot(Succeed()) - By("check if cluster resource placement is updated to Scheduled & Applied") - testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementConditionTypeScheduled), v1.ConditionTrue, 3*testutils.PollTimeout) - testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), v1.ConditionTrue, 3*testutils.PollTimeout) + By("add member cluster in the hub cluster") + mc = testutils.NewMemberCluster(MemberCluster.ClusterName, 60, v1alpha1.ClusterStateJoin) + testutils.CreateMemberCluster(*HubCluster, mc) - By("check if resource is propagated to member cluster") - testutils.WaitClusterRole(*MemberCluster, cr) + By("check if member cluster condition is updated to Joined") + testutils.WaitConditionMemberCluster(*HubCluster, mc, v1alpha1.ConditionTypeMemberClusterJoined, v1.ConditionTrue, testutils.PollTimeout) + + By("verify that the cluster resource placement is applied") + testutils.WaitConditionClusterResourcePlacement(*HubCluster, crp, string(v1alpha1.ResourcePlacementStatusConditionTypeApplied), v1.ConditionTrue, testutils.PollTimeout) + + By("verify the resource is propagated to member cluster") + Expect(MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name, Namespace: ""}, cr)).Should(Succeed()) + + By("mark the member cluster in the hub cluster as leave") + testutils.UpdateMemberClusterState(*HubCluster, mc, v1alpha1.ClusterStateLeave) - By("delete cluster resource placement & cluster role on hub cluster") + By("verify that member cluster is marked as notReadyToJoin") + testutils.WaitConditionMemberCluster(*HubCluster, mc, v1alpha1.ConditionTypeMemberClusterReadyToJoin, v1.ConditionFalse, testutils.PollTimeout) + + By("verify that the resource is still on the member cluster") + Consistently(func() error { + return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name, Namespace: ""}, cr) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + + By("delete the crp from the hub") testutils.DeleteClusterResourcePlacement(*HubCluster, crp) - Eventually(func() bool { - err := HubCluster.KubeClient.Get(context.TODO(), types.NamespacedName{Name: crp.Name, Namespace: ""}, crp) - return apierrors.IsNotFound(err) - }, testutils.PollTimeout, testutils.PollInterval).Should(Equal(true)) + + By("verify that the resource is still on the member cluster") + Consistently(func() error { + return MemberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: cr.Name, Namespace: ""}, cr) + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + + By("delete cluster role on hub cluster") testutils.DeleteClusterRole(*HubCluster, cr) }) })