diff --git a/charts/hub-agent/values.yaml b/charts/hub-agent/values.yaml index d3ef28cd7..0f3884609 100644 --- a/charts/hub-agent/values.yaml +++ b/charts/hub-agent/values.yaml @@ -8,9 +8,9 @@ image: repository: ghcr.io/azure/fleet/hub-agent pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. - tag: v0.1.0 + tag: main -logVerbosity: 2 +logVerbosity: 5 enableWebhook: false diff --git a/charts/member-agent/values.yaml b/charts/member-agent/values.yaml index 2c7d6fcea..2071c9a86 100644 --- a/charts/member-agent/values.yaml +++ b/charts/member-agent/values.yaml @@ -3,14 +3,14 @@ replicaCount: 1 image: repository: ghcr.io/azure/fleet/member-agent pullPolicy: Always - tag: v0.1.0 + tag: main -logVerbosity: 3 +logVerbosity: 5 refreshtoken: repository: ghcr.io/azure/fleet/refresh-token pullPolicy: Always - tag: v0.1.0 + tag: main resources: limits: diff --git a/pkg/controllers/clusterresourceplacement/cluster_selector.go b/pkg/controllers/clusterresourceplacement/cluster_selector.go index d1fe72a35..be82fd6f6 100644 --- a/pkg/controllers/clusterresourceplacement/cluster_selector.go +++ b/pkg/controllers/clusterresourceplacement/cluster_selector.go @@ -9,6 +9,7 @@ import ( "fmt" "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" @@ -43,7 +44,19 @@ func (r *Reconciler) selectClusters(placement *fleetv1alpha1.ClusterResourcePlac klog.V(4).InfoS("use the cluster names provided as the list of cluster we select", "placement", placement.Name, "clusters", placement.Spec.Policy.ClusterNames) // TODO: filter by cluster health - return placement.Spec.Policy.ClusterNames, nil + var selectedClusters []string + for _, clusterName := range placement.Spec.Policy.ClusterNames { + _, err = r.InformerManager.Lister(utils.MemberClusterGVR).Get(clusterName) + if err != nil { + klog.ErrorS(err, "cannot get the cluster", "clusterName", clusterName) + if !apierrors.IsNotFound(err) { + return nil, err + } + } else { + selectedClusters = append(selectedClusters, clusterName) + } + } + return selectedClusters, nil } // no Affinity or ClusterAffinity set diff --git a/pkg/controllers/clusterresourceplacement/placement_controller.go b/pkg/controllers/clusterresourceplacement/placement_controller.go index d6a587c90..1b6d2d562 100644 --- a/pkg/controllers/clusterresourceplacement/placement_controller.go +++ b/pkg/controllers/clusterresourceplacement/placement_controller.go @@ -12,7 +12,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -66,9 +65,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct placementOld, err := r.getPlacement(name) if err != nil { - if !apierrors.IsNotFound(err) { - klog.ErrorS(err, "Failed to get the cluster resource placementOld in hub agent", "placement", name) - } + klog.ErrorS(err, "Failed to get the cluster resource placement in hub", "placement", name) return ctrl.Result{}, client.IgnoreNotFound(err) } placeRef := klog.KObj(placementOld) @@ -91,6 +88,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct klog.ErrorS(scheduleErr, "Failed to select the clusters", "placement", placeRef) r.updatePlacementScheduledCondition(placementOld, scheduleErr) _ = r.Client.Status().Update(ctx, placementOld, client.FieldOwner(utils.PlacementFieldManagerName)) + // TODO: check on certain error (i.e. not cluster scoped) and do not retry return ctrl.Result{}, scheduleErr } if len(selectedClusters) == 0 { diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index d3f516f66..b91a811d3 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -79,7 +79,6 @@ func (r *Reconciler) gatherSelectedResource(ctx context.Context, placement *flee objs, err = r.fetchClusterScopedResources(ctx, selector, placement.GetName()) } if err != nil { - // TODO: revisit if return partial result makes sense return nil, errors.Wrapf(err, "selector = %v", selector) } resources = append(resources, objs...) @@ -116,8 +115,11 @@ func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector f return nil, errors.Wrap(err, "Failed to get GVR of the selector") } gvr := restMapping.Resource + if !r.InformerManager.IsClusterScopedResources(gvr) { + return nil, errors.New(fmt.Sprintf("%+v is not a cluster scoped resource", restMapping.Resource)) + } if !r.InformerManager.IsInformerSynced(gvr) { - return nil, fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource) + return nil, errors.New(fmt.Sprintf("informer cache for %+v is not synced yet", restMapping.Resource)) } lister := r.InformerManager.Lister(gvr) @@ -125,7 +127,8 @@ func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector f if len(selector.Name) != 0 { obj, err := lister.Get(selector.Name) if err != nil { - return nil, client.IgnoreNotFound(errors.Wrap(err, "cannot get the objets")) + klog.ErrorS(err, "cannot get the resource", "gvr", gvr, "name", selector.Name) + return nil, client.IgnoreNotFound(err) } uObj := obj.DeepCopyObject().(*unstructured.Unstructured) if uObj.GetDeletionTimestamp() != nil { @@ -217,7 +220,8 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(ctx context.Context, namesp // select the namespace object itself obj, err := r.InformerManager.Lister(utils.NamespaceGVR).Get(namespaceName) if err != nil { - return nil, errors.Wrapf(err, "cannot get the namespace %s object", namespaceName) + klog.ErrorS(err, "cannot get the namespace", "namespace", namespaceName) + return nil, client.IgnoreNotFound(err) } nameSpaceObj := obj.DeepCopyObject().(*unstructured.Unstructured) if nameSpaceObj.GetDeletionTimestamp() != nil { @@ -285,6 +289,16 @@ func generateManifest(object *unstructured.Unstructured) (*workv1alpha1.Manifest object.SetSelfLink("") object.SetDeletionTimestamp(nil) object.SetManagedFields(nil) + // remove kubectl last applied annotation if exist + annots := object.GetAnnotations() + if annots != nil { + delete(annots, corev1.LastAppliedConfigAnnotation) + if len(annots) == 0 { + object.SetAnnotations(nil) + } else { + object.SetAnnotations(annots) + } + } // Remove all the owner references as the UID in the owner reference can't be transferred to // the member clusters // TODO: Establish a way to keep the ownership relation through work-api diff --git a/test/e2e/work_api_test.go b/test/e2e/work_api_test.go index eb10dfebf..bd5e486dd 100644 --- a/test/e2e/work_api_test.go +++ b/test/e2e/work_api_test.go @@ -8,6 +8,7 @@ package e2e import ( "context" "fmt" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -28,8 +29,8 @@ import ( ) const ( - eventuallyTimeout = 60 // seconds - eventuallyInterval = 1 // seconds + eventuallyTimeout = 10 * time.Second + eventuallyInterval = 500 * time.Millisecond ) var defaultWorkNamespace = fmt.Sprintf(fleetutil.NamespaceNameFormat, MemberCluster.ClusterName) @@ -140,7 +141,7 @@ var _ = Describe("work-api testing", Ordered, func() { }) - It("should apply both the works with duplicated manifest", func() { + XIt("should apply both the works with duplicated manifest", func() { By("creating the work resources") err = createWork(workOne, HubCluster) Expect(err).ToNot(HaveOccurred()) diff --git a/test/integration/cluster_placement_test.go b/test/integration/cluster_placement_test.go index 5f419a7ca..3c3da70c1 100644 --- a/test/integration/cluster_placement_test.go +++ b/test/integration/cluster_placement_test.go @@ -161,6 +161,45 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() { verifyPlacementApplyStatus(crp, metav1.ConditionTrue, clusterresourceplacement.ApplySucceededReason) }) + It("Test select the resources by name not found", func() { + crp = &fleetv1alpha1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-list-resource", + }, + Spec: fleetv1alpha1.ClusterResourcePlacementSpec{ + ResourceSelectors: []fleetv1alpha1.ClusterResourceSelector{ + { + Group: apiextensionsv1.GroupName, + Version: "v1", + Kind: "CustomResourceDefinition", + Name: "doesnotexist", + }, + }, + }, + } + Expect(k8sClient.Create(ctx, crp)).Should(Succeed()) + By("Select named resource clusterResourcePlacement created") + + // verify that we have created work objects that contain the resource selected + waitForPlacementScheduled(crp.GetName()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed()) + verifyPlacementScheduleStatus(crp, 0, 0, metav1.ConditionFalse) + + //add a valid cluster + By("Select named cluster clusterResourcePlacement updated") + crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, fleetv1alpha1.ClusterResourceSelector{ + Group: rbacv1.GroupName, + Version: "v1", + Kind: ClusterRoleKind, + Name: "test-cluster-role", + }) + Expect(k8sClient.Update(ctx, crp)).Should(Succeed()) + By("verify that we have created work objects in the newly selected cluster") + verifyWorkObjects(crp, []string{ClusterRoleKind}, []*fleetv1alpha1.MemberCluster{&clusterA}) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed()) + verifyPlacementScheduleStatus(crp, 1, 2, metav1.ConditionTrue) + }) + It("Test select the resources by label", func() { crp = &fleetv1alpha1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -203,6 +242,14 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() { Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed()) verifyPlacementScheduleStatus(crp, 2, 2, metav1.ConditionTrue) verifyPlacementApplyStatus(crp, metav1.ConditionUnknown, clusterresourceplacement.ApplyPendingReason) + + By("Mimic work apply succeeded") + markWorkAppliedStatusSuccess(crp, &clusterA) + markWorkAppliedStatusSuccess(crp, &clusterB) + + waitForPlacementScheduleStopped(crp.Name) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed()) + verifyPlacementApplyStatus(crp, metav1.ConditionTrue, clusterresourceplacement.ApplySucceededReason) }) It("Test select all the resources in a namespace", func() { @@ -255,10 +302,6 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() { Expect(len(clusterWork.Spec.Workload.Manifests)).Should(BeIdenticalTo(len(namespacedResource) + 1)) }) - XIt("Test some of the resources selectors does not match any resource", func() { - - }) - It("Test select only the propagated resources in a namespace", func() { By("Create a lease resource in the namespace") lease := coordv1.Lease{ @@ -723,6 +766,43 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() { }, &clusterWork)).Should(utils.NotFoundMatcher{}) }) + It("Test select named cluster does not exist", func() { + crp = &fleetv1alpha1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-list-cluster", + }, + Spec: fleetv1alpha1.ClusterResourcePlacementSpec{ + ResourceSelectors: []fleetv1alpha1.ClusterResourceSelector{ + { + Group: rbacv1.GroupName, + Version: "v1", + Kind: ClusterRoleKind, + Name: "test-cluster-role", + }, + }, + Policy: &fleetv1alpha1.PlacementPolicy{ + ClusterNames: []string{"doesnotexist"}, + }, + }, + } + Expect(k8sClient.Create(ctx, crp)).Should(Succeed()) + By("Select named cluster clusterResourcePlacement created") + + waitForPlacementScheduled(crp.GetName()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed()) + verifyPlacementScheduleStatus(crp, 0, 0, metav1.ConditionFalse) + + //add a valid cluster + By("Select named cluster clusterResourcePlacement updated") + crp.Spec.Policy.ClusterNames = append(crp.Spec.Policy.ClusterNames, clusterA.Name) + Expect(k8sClient.Update(ctx, crp)).Should(Succeed()) + waitForPlacementScheduled(crp.GetName()) + By("verify that we have created work objects in the newly selected cluster") + verifyWorkObjects(crp, []string{ClusterRoleKind}, []*fleetv1alpha1.MemberCluster{&clusterA}) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed()) + verifyPlacementScheduleStatus(crp, 1, 1, metav1.ConditionTrue) + }) + It("Test select member cluster by label with change", func() { markInternalMCJoined(clusterB) crp = &fleetv1alpha1.ClusterResourcePlacement{