Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions charts/hub-agent/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ image:
repository: ghcr.io/azure/fleet/hub-agent
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: v0.1.0
tag: main

logVerbosity: 2
logVerbosity: 5

enableWebhook: false

Expand Down
6 changes: 3 additions & 3 deletions charts/member-agent/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@ replicaCount: 1
image:
repository: ghcr.io/azure/fleet/member-agent
pullPolicy: Always
tag: v0.1.0
tag: main

logVerbosity: 3
logVerbosity: 5

refreshtoken:
repository: ghcr.io/azure/fleet/refresh-token
pullPolicy: Always
tag: v0.1.0
tag: main

resources:
limits:
Expand Down
15 changes: 14 additions & 1 deletion pkg/controllers/clusterresourceplacement/cluster_selector.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"fmt"

"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
Expand Down Expand Up @@ -43,7 +44,19 @@ func (r *Reconciler) selectClusters(placement *fleetv1alpha1.ClusterResourcePlac
klog.V(4).InfoS("use the cluster names provided as the list of cluster we select",
"placement", placement.Name, "clusters", placement.Spec.Policy.ClusterNames)
// TODO: filter by cluster health
return placement.Spec.Policy.ClusterNames, nil
var selectedClusters []string
for _, clusterName := range placement.Spec.Policy.ClusterNames {
_, err = r.InformerManager.Lister(utils.MemberClusterGVR).Get(clusterName)
if err != nil {
klog.ErrorS(err, "cannot get the cluster", "clusterName", clusterName)
if !apierrors.IsNotFound(err) {
return nil, err
}
} else {
selectedClusters = append(selectedClusters, clusterName)
}
}
return selectedClusters, nil
}

// no Affinity or ClusterAffinity set
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
Expand Down Expand Up @@ -66,9 +65,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct

placementOld, err := r.getPlacement(name)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.ErrorS(err, "Failed to get the cluster resource placementOld in hub agent", "placement", name)
}
klog.ErrorS(err, "Failed to get the cluster resource placement in hub", "placement", name)
return ctrl.Result{}, client.IgnoreNotFound(err)
}
placeRef := klog.KObj(placementOld)
Expand All @@ -91,6 +88,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct
klog.ErrorS(scheduleErr, "Failed to select the clusters", "placement", placeRef)
r.updatePlacementScheduledCondition(placementOld, scheduleErr)
_ = r.Client.Status().Update(ctx, placementOld, client.FieldOwner(utils.PlacementFieldManagerName))
// TODO: check on certain error (i.e. not cluster scoped) and do not retry
return ctrl.Result{}, scheduleErr
}
if len(selectedClusters) == 0 {
Expand Down
22 changes: 18 additions & 4 deletions pkg/controllers/clusterresourceplacement/resource_selector.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ func (r *Reconciler) gatherSelectedResource(ctx context.Context, placement *flee
objs, err = r.fetchClusterScopedResources(ctx, selector, placement.GetName())
}
if err != nil {
// TODO: revisit if return partial result makes sense
return nil, errors.Wrapf(err, "selector = %v", selector)
}
resources = append(resources, objs...)
Expand Down Expand Up @@ -116,16 +115,20 @@ func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector f
return nil, errors.Wrap(err, "Failed to get GVR of the selector")
}
gvr := restMapping.Resource
if !r.InformerManager.IsClusterScopedResources(gvr) {
return nil, errors.New(fmt.Sprintf("%+v is not a cluster scoped resource", restMapping.Resource))
}
if !r.InformerManager.IsInformerSynced(gvr) {
return nil, fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource)
return nil, errors.New(fmt.Sprintf("informer cache for %+v is not synced yet", restMapping.Resource))
}

lister := r.InformerManager.Lister(gvr)
// TODO: validator should enforce the mutual exclusiveness between the `name` and `labelSelector` fields
if len(selector.Name) != 0 {
obj, err := lister.Get(selector.Name)
if err != nil {
return nil, client.IgnoreNotFound(errors.Wrap(err, "cannot get the objets"))
klog.ErrorS(err, "cannot get the resource", "gvr", gvr, "name", selector.Name)
return nil, client.IgnoreNotFound(err)
}
uObj := obj.DeepCopyObject().(*unstructured.Unstructured)
if uObj.GetDeletionTimestamp() != nil {
Expand Down Expand Up @@ -217,7 +220,8 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(ctx context.Context, namesp
// select the namespace object itself
obj, err := r.InformerManager.Lister(utils.NamespaceGVR).Get(namespaceName)
if err != nil {
return nil, errors.Wrapf(err, "cannot get the namespace %s object", namespaceName)
klog.ErrorS(err, "cannot get the namespace", "namespace", namespaceName)
return nil, client.IgnoreNotFound(err)
}
nameSpaceObj := obj.DeepCopyObject().(*unstructured.Unstructured)
if nameSpaceObj.GetDeletionTimestamp() != nil {
Expand Down Expand Up @@ -285,6 +289,16 @@ func generateManifest(object *unstructured.Unstructured) (*workv1alpha1.Manifest
object.SetSelfLink("")
object.SetDeletionTimestamp(nil)
object.SetManagedFields(nil)
// remove kubectl last applied annotation if exist
annots := object.GetAnnotations()
if annots != nil {
delete(annots, corev1.LastAppliedConfigAnnotation)
if len(annots) == 0 {
object.SetAnnotations(nil)
} else {
object.SetAnnotations(annots)
}
}
// Remove all the owner references as the UID in the owner reference can't be transferred to
// the member clusters
// TODO: Establish a way to keep the ownership relation through work-api
Expand Down
7 changes: 4 additions & 3 deletions test/e2e/work_api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ package e2e
import (
"context"
"fmt"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
Expand All @@ -28,8 +29,8 @@ import (
)

const (
eventuallyTimeout = 60 // seconds
eventuallyInterval = 1 // seconds
eventuallyTimeout = 10 * time.Second
eventuallyInterval = 500 * time.Millisecond
)

var defaultWorkNamespace = fmt.Sprintf(fleetutil.NamespaceNameFormat, MemberCluster.ClusterName)
Expand Down Expand Up @@ -140,7 +141,7 @@ var _ = Describe("work-api testing", Ordered, func() {

})

It("should apply both the works with duplicated manifest", func() {
XIt("should apply both the works with duplicated manifest", func() {
By("creating the work resources")
err = createWork(workOne, HubCluster)
Expect(err).ToNot(HaveOccurred())
Expand Down
88 changes: 84 additions & 4 deletions test/integration/cluster_placement_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,45 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() {
verifyPlacementApplyStatus(crp, metav1.ConditionTrue, clusterresourceplacement.ApplySucceededReason)
})

It("Test select the resources by name not found", func() {
crp = &fleetv1alpha1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Name: "test-list-resource",
},
Spec: fleetv1alpha1.ClusterResourcePlacementSpec{
ResourceSelectors: []fleetv1alpha1.ClusterResourceSelector{
{
Group: apiextensionsv1.GroupName,
Version: "v1",
Kind: "CustomResourceDefinition",
Name: "doesnotexist",
},
},
},
}
Expect(k8sClient.Create(ctx, crp)).Should(Succeed())
By("Select named resource clusterResourcePlacement created")

// verify that we have created work objects that contain the resource selected
waitForPlacementScheduled(crp.GetName())
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed())
verifyPlacementScheduleStatus(crp, 0, 0, metav1.ConditionFalse)

//add a valid cluster
By("Select named cluster clusterResourcePlacement updated")
crp.Spec.ResourceSelectors = append(crp.Spec.ResourceSelectors, fleetv1alpha1.ClusterResourceSelector{
Group: rbacv1.GroupName,
Version: "v1",
Kind: ClusterRoleKind,
Name: "test-cluster-role",
})
Expect(k8sClient.Update(ctx, crp)).Should(Succeed())
By("verify that we have created work objects in the newly selected cluster")
verifyWorkObjects(crp, []string{ClusterRoleKind}, []*fleetv1alpha1.MemberCluster{&clusterA})
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed())
verifyPlacementScheduleStatus(crp, 1, 2, metav1.ConditionTrue)
})

It("Test select the resources by label", func() {
crp = &fleetv1alpha1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -203,6 +242,14 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() {
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed())
verifyPlacementScheduleStatus(crp, 2, 2, metav1.ConditionTrue)
verifyPlacementApplyStatus(crp, metav1.ConditionUnknown, clusterresourceplacement.ApplyPendingReason)

By("Mimic work apply succeeded")
markWorkAppliedStatusSuccess(crp, &clusterA)
markWorkAppliedStatusSuccess(crp, &clusterB)

waitForPlacementScheduleStopped(crp.Name)
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed())
verifyPlacementApplyStatus(crp, metav1.ConditionTrue, clusterresourceplacement.ApplySucceededReason)
})

It("Test select all the resources in a namespace", func() {
Expand Down Expand Up @@ -255,10 +302,6 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() {
Expect(len(clusterWork.Spec.Workload.Manifests)).Should(BeIdenticalTo(len(namespacedResource) + 1))
})

XIt("Test some of the resources selectors does not match any resource", func() {

})

It("Test select only the propagated resources in a namespace", func() {
By("Create a lease resource in the namespace")
lease := coordv1.Lease{
Expand Down Expand Up @@ -723,6 +766,43 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() {
}, &clusterWork)).Should(utils.NotFoundMatcher{})
})

It("Test select named cluster does not exist", func() {
crp = &fleetv1alpha1.ClusterResourcePlacement{
ObjectMeta: metav1.ObjectMeta{
Name: "test-list-cluster",
},
Spec: fleetv1alpha1.ClusterResourcePlacementSpec{
ResourceSelectors: []fleetv1alpha1.ClusterResourceSelector{
{
Group: rbacv1.GroupName,
Version: "v1",
Kind: ClusterRoleKind,
Name: "test-cluster-role",
},
},
Policy: &fleetv1alpha1.PlacementPolicy{
ClusterNames: []string{"doesnotexist"},
},
},
}
Expect(k8sClient.Create(ctx, crp)).Should(Succeed())
By("Select named cluster clusterResourcePlacement created")

waitForPlacementScheduled(crp.GetName())
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed())
verifyPlacementScheduleStatus(crp, 0, 0, metav1.ConditionFalse)

//add a valid cluster
By("Select named cluster clusterResourcePlacement updated")
crp.Spec.Policy.ClusterNames = append(crp.Spec.Policy.ClusterNames, clusterA.Name)
Expect(k8sClient.Update(ctx, crp)).Should(Succeed())
waitForPlacementScheduled(crp.GetName())
By("verify that we have created work objects in the newly selected cluster")
verifyWorkObjects(crp, []string{ClusterRoleKind}, []*fleetv1alpha1.MemberCluster{&clusterA})
Expect(k8sClient.Get(ctx, types.NamespacedName{Name: crp.Name}, crp)).Should(Succeed())
verifyPlacementScheduleStatus(crp, 1, 1, metav1.ConditionTrue)
})

It("Test select member cluster by label with change", func() {
markInternalMCJoined(clusterB)
crp = &fleetv1alpha1.ClusterResourcePlacement{
Expand Down