From 75875375b320b538f399954aebb9de52c7606ec4 Mon Sep 17 00:00:00 2001 From: Zhiying Lin Date: Tue, 20 Jun 2023 13:15:34 +0800 Subject: [PATCH] create resource --- .../v1beta1/resourcesnapshot_types.go | 4 + .../clusterresourceplacement_controller.go | 300 ++++++-- ...lusterresourceplacement_controller_test.go | 675 +++++++++++++++++- .../resource_selector_test.go | 38 +- 4 files changed, 924 insertions(+), 93 deletions(-) diff --git a/apis/placement/v1beta1/resourcesnapshot_types.go b/apis/placement/v1beta1/resourcesnapshot_types.go index 7caf14d42..2d95b4831 100644 --- a/apis/placement/v1beta1/resourcesnapshot_types.go +++ b/apis/placement/v1beta1/resourcesnapshot_types.go @@ -21,6 +21,9 @@ const ( // NumberOfResourceSnapshotsAnnotation is the annotation that contains the total number of resource snapshots. NumberOfResourceSnapshotsAnnotation = fleetPrefix + "numberOfResourceSnapshots" + + // ResourceSnapshotNameFmt is resourcePolicySnapshot name format: {CRPName}-{resourceIndex}. + ResourceSnapshotNameFmt = "%s-%d" ) // +genclient @@ -38,6 +41,7 @@ const ( // We assign an ever-increasing index for each such group of resourceSnapshots. // The name convention of a clusterResourceSnapshot is {CRPName}-{resourceIndex}(-{subindex})* // where the name of the first snapshot of a group has no subindex part so its name is {CRPName}-{resourceIndex}. +// resourceIndex will begin with 0. // Each snapshot MUST have the following labels: // - `CRPTrackingLabel` which points to its owner CRP. // - `ResourceIndexLabel` which is the index of the snapshot group. diff --git a/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller.go b/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller.go index f8805f47f..4d953cc97 100644 --- a/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller.go +++ b/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller.go @@ -17,8 +17,9 @@ import ( "go.goms.io/fleet/pkg/utils/controller" ) -func (r *Reconciler) Reconcile(_ context.Context, _ controller.QueueKey) (ctrl.Result, error) { - return ctrl.Result{}, nil +func (r *Reconciler) Reconcile(ctx context.Context, _ controller.QueueKey) (ctrl.Result, error) { + // TODO workaround to bypass lint check + return r.handleUpdate(ctx, nil) } // handleUpdate handles the create/update clusterResourcePlacement event. @@ -26,20 +27,52 @@ func (r *Reconciler) Reconcile(_ context.Context, _ controller.QueueKey) (ctrl.R // clusterPolicySnapshot status and work status. // If the error type is ErrUnexpectedBehavior, the controller will skip the reconciling. func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement) (ctrl.Result, error) { + latestPolicySnapshot, err := r.getOrCreateClusterPolicySnapshot(ctx, crp) + if err != nil { + return ctrl.Result{}, err + } + selectedResources, err := r.selectResourcesForPlacement(crp) + if err != nil { + return ctrl.Result{}, err + } + resourceSnapshotSpec := fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: selectedResources, + PolicySnapshotName: latestPolicySnapshot.Name, + } + _, err = r.getOrCreateClusterResourceSnapshot(ctx, crp, &resourceSnapshotSpec) + if err != nil { + return ctrl.Result{}, err + } + + // update the status based on the latestPolicySnapshot status + // update the status based on the work + return ctrl.Result{}, nil +} + +func (r *Reconciler) getOrCreateClusterPolicySnapshot(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement) (*fleetv1beta1.ClusterSchedulingPolicySnapshot, error) { crpKObj := klog.KObj(crp) schedulingPolicy := *crp.Spec.Policy // will exclude the numberOfClusters schedulingPolicy.NumberOfClusters = nil policyHash, err := generatePolicyHash(&schedulingPolicy) if err != nil { klog.ErrorS(err, "Failed to generate policy hash of crp", "clusterResourcePlacement", crpKObj) - return ctrl.Result{}, controller.NewUnexpectedBehaviorError(err) + return nil, controller.NewUnexpectedBehaviorError(err) } + // latestPolicySnapshotIndex should be -1 when there is no snapshot. latestPolicySnapshot, latestPolicySnapshotIndex, err := r.lookupLatestClusterPolicySnapshot(ctx, crp) if err != nil { - return ctrl.Result{}, err + return nil, err + } + + if latestPolicySnapshot != nil && string(latestPolicySnapshot.Spec.PolicyHash) == policyHash { + if err := r.ensureLatestPolicySnapshot(ctx, crp, latestPolicySnapshot); err != nil { + return nil, err + } + return latestPolicySnapshot, nil } + // Need to create new snapshot when 1) there is no snapshots or 2) the latest snapshot hash != current one. // mark the last policy snapshot as inactive if it is different from what we have now if latestPolicySnapshot != nil && string(latestPolicySnapshot.Spec.PolicyHash) != policyHash && @@ -48,59 +81,119 @@ func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.Cluster latestPolicySnapshot.Labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) if err := r.Client.Update(ctx, latestPolicySnapshot); err != nil { klog.ErrorS(err, "Failed to set the isLatestSnapshot label to false", "clusterPolicySnapshot", klog.KObj(latestPolicySnapshot)) - return ctrl.Result{}, controller.NewAPIServerError(false, err) + return nil, controller.NewAPIServerError(false, err) } } - if latestPolicySnapshot != nil && string(latestPolicySnapshot.Spec.PolicyHash) == policyHash { - if err := r.ensureLatestPolicySnapshot(ctx, crp, latestPolicySnapshot); err != nil { - return ctrl.Result{}, err - } - } else { - // create a new policy snapshot - latestPolicySnapshotIndex++ - latestPolicySnapshot = &fleetv1beta1.ClusterSchedulingPolicySnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, crp.Name, latestPolicySnapshotIndex), - Labels: map[string]string{ - fleetv1beta1.CRPTrackingLabel: crp.Name, - fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), - fleetv1beta1.PolicyIndexLabel: strconv.Itoa(latestPolicySnapshotIndex), - }, - }, - Spec: fleetv1beta1.SchedulingPolicySnapshotSpec{ - Policy: &schedulingPolicy, - PolicyHash: []byte(policyHash), + + // create a new policy snapshot + latestPolicySnapshotIndex++ + latestPolicySnapshot = &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, crp.Name, latestPolicySnapshotIndex), + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: crp.Name, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + fleetv1beta1.PolicyIndexLabel: strconv.Itoa(latestPolicySnapshotIndex), }, + }, + Spec: fleetv1beta1.SchedulingPolicySnapshotSpec{ + Policy: &schedulingPolicy, + PolicyHash: []byte(policyHash), + }, + } + policySnapshotKObj := klog.KObj(latestPolicySnapshot) + if err := controllerutil.SetControllerReference(crp, latestPolicySnapshot, r.Scheme); err != nil { + klog.ErrorS(err, "Failed to set owner reference", "clusterPolicySnapshot", policySnapshotKObj) + // should never happen + return nil, controller.NewUnexpectedBehaviorError(err) + } + // make sure each policySnapshot should always have the annotation if CRP is selectN type + if crp.Spec.Policy != nil && + crp.Spec.Policy.PlacementType == fleetv1beta1.PickNPlacementType && + crp.Spec.Policy.NumberOfClusters != nil { + latestPolicySnapshot.Annotations = map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(int(*crp.Spec.Policy.NumberOfClusters)), } - policySnapshotKObj := klog.KObj(latestPolicySnapshot) - if err := controllerutil.SetControllerReference(crp, latestPolicySnapshot, r.Scheme); err != nil { - klog.ErrorS(err, "Failed to set owner reference", "clusterPolicySnapshot", policySnapshotKObj) - // should never happen - return ctrl.Result{}, controller.NewUnexpectedBehaviorError(err) + } + + if err := r.Client.Create(ctx, latestPolicySnapshot); err != nil { + klog.ErrorS(err, "Failed to create new clusterPolicySnapshot", "clusterPolicySnapshot", policySnapshotKObj) + return nil, controller.NewAPIServerError(false, err) + } + return latestPolicySnapshot, nil +} + +// TODO handle all the resources selected by placement larger than 1MB size limit of k8s objects. +func (r *Reconciler) getOrCreateClusterResourceSnapshot(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, resourceSnapshotSpec *fleetv1beta1.ResourceSnapshotSpec) (*fleetv1beta1.ClusterResourceSnapshot, error) { + resourceHash, err := generateResourceHash(resourceSnapshotSpec) + if err != nil { + klog.ErrorS(err, "Failed to generate resource hash of crp", "clusterResourcePlacement", klog.KObj(crp)) + return nil, controller.NewUnexpectedBehaviorError(err) + } + + // latestResourceSnapshotIndex should be -1 when there is no snapshot. + latestResourceSnapshot, latestResourceSnapshotIndex, err := r.lookupLatestResourceSnapshot(ctx, crp) + if err != nil { + return nil, err + } + + latestResourceSnapshotHash := "" + if latestResourceSnapshot != nil { + latestResourceSnapshotHash, err = parseResourceGroupHashFromAnnotation(latestResourceSnapshot) + if err != nil { + klog.ErrorS(err, "Failed to get the ResourceGroupHashAnnotation", "clusterResourceSnapshot", klog.KObj(latestResourceSnapshot)) + return nil, controller.NewUnexpectedBehaviorError(err) } - // make sure each policySnapshot should always have the annotation if CRP is selectN type - if crp.Spec.Policy != nil && - crp.Spec.Policy.PlacementType == fleetv1beta1.PickNPlacementType && - crp.Spec.Policy.NumberOfClusters != nil { - latestPolicySnapshot.Annotations = map[string]string{ - fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(int(*crp.Spec.Policy.NumberOfClusters)), - } + } + + if latestResourceSnapshot != nil && latestResourceSnapshotHash == resourceHash { + if err := r.ensureLatestResourceSnapshot(ctx, latestResourceSnapshot); err != nil { + return nil, err } + return latestResourceSnapshot, nil + } - if err := r.Client.Create(ctx, latestPolicySnapshot); err != nil { - klog.ErrorS(err, "Failed to create new clusterPolicySnapshot", "clusterPolicySnapshot", policySnapshotKObj) - return ctrl.Result{}, controller.NewAPIServerError(false, err) + // Need to create new snapshot when 1) there is no snapshots or 2) the latest snapshot hash != current one. + // mark the last resource snapshot as inactive if it is different from what we have now + if latestResourceSnapshot != nil && + latestResourceSnapshotHash != resourceHash && + latestResourceSnapshot.Labels[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { + // set the latest label to false first to make sure there is only one or none active resource snapshot + latestResourceSnapshot.Labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + if err := r.Client.Update(ctx, latestResourceSnapshot); err != nil { + klog.ErrorS(err, "Failed to set the isLatestSnapshot label to false", "clusterResourceSnapshot", klog.KObj(latestResourceSnapshot)) + return nil, controller.NewAPIServerError(false, err) } } - // create clusterResourceSnapshot - // TODO - if _, err := r.selectResourcesForPlacement(crp); err != nil { - return ctrl.Result{}, err + // create a new resource snapshot + latestResourceSnapshotIndex++ + latestResourceSnapshot = &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, crp.Name, latestResourceSnapshotIndex), + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: crp.Name, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + fleetv1beta1.ResourceIndexLabel: strconv.Itoa(latestResourceSnapshotIndex), + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceHash, + }, + }, + Spec: *resourceSnapshotSpec, } - // update the status based on the latestPolicySnapshot status - // update the status based on the work - return ctrl.Result{}, nil + resourceSnapshotKObj := klog.KObj(latestResourceSnapshot) + if err := controllerutil.SetControllerReference(crp, latestResourceSnapshot, r.Scheme); err != nil { + klog.ErrorS(err, "Failed to set owner reference", "clusterResourceSnapshot", resourceSnapshotKObj) + // should never happen + return nil, controller.NewUnexpectedBehaviorError(err) + } + + if err := r.Client.Create(ctx, latestResourceSnapshot); err != nil { + klog.ErrorS(err, "Failed to create new clusterResourceSnapshot", "clusterResourceSnapshot", resourceSnapshotKObj) + return nil, controller.NewAPIServerError(false, err) + } + return latestResourceSnapshot, nil } // ensureLatestPolicySnapshot ensures the latest policySnapshot has the isLatest label and the numberOfClusters are updated. @@ -141,6 +234,23 @@ func (r *Reconciler) ensureLatestPolicySnapshot(ctx context.Context, crp *fleetv return nil } +// ensureLatestResourceSnapshot ensures the latest resourceSnapshot has the isLatest label. +func (r *Reconciler) ensureLatestResourceSnapshot(ctx context.Context, latest *fleetv1beta1.ClusterResourceSnapshot) error { + if latest.Labels[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { + return nil + } + // It could happen when the controller just sets the latest label to false for the old snapshot, and fails to + // create a new resource snapshot. + // And then the customers revert back their resource to the old one again. + // In this case, the "latest" snapshot without isLatest label has the same resource hash as the current one. + latest.Labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(true) + if err := r.Client.Update(ctx, latest); err != nil { + klog.ErrorS(err, "Failed to update the clusterResourceSnapshot", "ClusterResourceSnapshot", klog.KObj(latest)) + return controller.NewAPIServerError(false, err) + } + return nil +} + // lookupLatestClusterPolicySnapshot finds the latest snapshots and its policy index. // There will be only one active policy snapshot if exists. // It first checks whether there is an active policy snapshot. @@ -170,7 +280,7 @@ func (r *Reconciler) lookupLatestClusterPolicySnapshot(ctx context.Context, crp } else if len(snapshotList.Items) > 1 { // It means there are multiple active snapshots and should never happen. err := fmt.Errorf("there are %d active clusterPolicySnapshots owned by clusterResourcePlacement %v", len(snapshotList.Items), crp.Name) - klog.ErrorS(err, "It should never happen", "clusterResourcePlacement", crpKObj) + klog.ErrorS(err, "Invalid clusterPolicySnapshots", "clusterResourcePlacement", crpKObj) return nil, -1, controller.NewUnexpectedBehaviorError(err) } // When there are no active snapshots, find the one who has the largest policy index. @@ -198,15 +308,79 @@ func (r *Reconciler) lookupLatestClusterPolicySnapshot(ctx context.Context, crp return &snapshotList.Items[index], lastPolicyIndex, nil } +// lookupLatestResourceSnapshot finds the latest snapshots and its resource index. +// There will be only one active resource snapshot if exists. +// It first checks whether there is an active resource snapshot. +// If not, it finds the one whose resourceIndex label is the largest. +// The resource index will always start from 0. +// Return error when 1) cannot list the snapshots 2) there are more than one active resource snapshots 3) snapshot has the +// invalid label value. +// 2 & 3 should never happen. +func (r *Reconciler) lookupLatestResourceSnapshot(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement) (*fleetv1beta1.ClusterResourceSnapshot, int, error) { + snapshotList := &fleetv1beta1.ClusterResourceSnapshotList{} + latestSnapshotLabelMatcher := client.MatchingLabels{ + fleetv1beta1.CRPTrackingLabel: crp.Name, + fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + } + crpKObj := klog.KObj(crp) + if err := r.Client.List(ctx, snapshotList, latestSnapshotLabelMatcher); err != nil { + klog.ErrorS(err, "Failed to list active clusterResourceSnapshots", "clusterResourcePlacement", crpKObj) + return nil, -1, controller.NewAPIServerError(false, err) + } + if len(snapshotList.Items) == 1 { + resourceIndex, err := parseResourceIndexFromLabel(&snapshotList.Items[0]) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "clusterResourceSnapshot", klog.KObj(&snapshotList.Items[0])) + return nil, -1, controller.NewUnexpectedBehaviorError(err) + } + return &snapshotList.Items[0], resourceIndex, nil + } else if len(snapshotList.Items) > 1 { + // It means there are multiple active snapshots and should never happen. + err := fmt.Errorf("there are %d active clusterResourceSnapshots owned by clusterResourcePlacement %v", len(snapshotList.Items), crp.Name) + klog.ErrorS(err, "Invalid clusterResourceSnapshots", "clusterResourcePlacement", crpKObj) + return nil, -1, controller.NewUnexpectedBehaviorError(err) + } + // When there are no active snapshots, find the one who has the largest resource index. + if err := r.Client.List(ctx, snapshotList, client.MatchingLabels{fleetv1beta1.CRPTrackingLabel: crp.Name}); err != nil { + klog.ErrorS(err, "Failed to list all clusterResourceSnapshots", "clusterResourcePlacement", crpKObj) + return nil, -1, controller.NewAPIServerError(false, err) + } + if len(snapshotList.Items) == 0 { + // The resource index of the first snapshot will start from 0. + return nil, -1, nil + } + index := -1 // the index of the cluster resource snapshot array + lastResourceIndex := -1 // the assigned resource index of the cluster resource snapshot + for i := range snapshotList.Items { + resourceIndex, err := parseResourceIndexFromLabel(&snapshotList.Items[i]) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "clusterResourceSnapshot", klog.KObj(&snapshotList.Items[i])) + return nil, -1, controller.NewUnexpectedBehaviorError(err) + } + if lastResourceIndex < resourceIndex { + index = i + lastResourceIndex = resourceIndex + } + } + return &snapshotList.Items[index], lastResourceIndex, nil +} + // parsePolicyIndexFromLabel returns error when parsing the label which should never return error in production. func parsePolicyIndexFromLabel(s *fleetv1beta1.ClusterSchedulingPolicySnapshot) (int, error) { indexLabel := s.Labels[fleetv1beta1.PolicyIndexLabel] v, err := strconv.Atoi(indexLabel) - if err != nil { - return -1, err + if err != nil || v < 0 { + return -1, fmt.Errorf("invalid policy index %q, error: %w", indexLabel, err) } - if v < 0 { - return -1, fmt.Errorf("policy index should not be negative: %d", v) + return v, nil +} + +// parseResourceIndexFromLabel returns error when parsing the label which should never return error in production. +func parseResourceIndexFromLabel(s *fleetv1beta1.ClusterResourceSnapshot) (int, error) { + indexLabel := s.Labels[fleetv1beta1.ResourceIndexLabel] + v, err := strconv.Atoi(indexLabel) + if err != nil || v < 0 { + return -1, fmt.Errorf("invalid resource index %q, error: %w", indexLabel, err) } return v, nil } @@ -215,11 +389,8 @@ func parsePolicyIndexFromLabel(s *fleetv1beta1.ClusterSchedulingPolicySnapshot) func parseNumberOfClustersFromAnnotation(s *fleetv1beta1.ClusterSchedulingPolicySnapshot) (int, error) { n := s.Annotations[fleetv1beta1.NumberOfClustersAnnotation] v, err := strconv.Atoi(n) - if err != nil { - return -1, err - } - if v < 0 { - return -1, fmt.Errorf("numberOfCluster should not be negative: %d", v) + if err != nil || v < 0 { + return -1, fmt.Errorf("invalid numberOfCluster %q, error: %w", n, err) } return v, nil } @@ -231,3 +402,20 @@ func generatePolicyHash(policy *fleetv1beta1.PlacementPolicy) (string, error) { } return fmt.Sprintf("%x", sha256.Sum256(jsonBytes)), nil } + +func generateResourceHash(rs *fleetv1beta1.ResourceSnapshotSpec) (string, error) { + jsonBytes, err := json.Marshal(rs) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", sha256.Sum256(jsonBytes)), nil +} + +// parseResourceGroupHashFromAnnotation returns error when parsing the annotation which should never return error in production. +func parseResourceGroupHashFromAnnotation(s *fleetv1beta1.ClusterResourceSnapshot) (string, error) { + v, ok := s.Annotations[fleetv1beta1.ResourceGroupHashAnnotation] + if !ok { + return "", fmt.Errorf("ResourceGroupHashAnnotation is not set") + } + return v, nil +} diff --git a/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller_test.go b/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller_test.go index 10420422d..3a952ae1c 100644 --- a/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller_test.go +++ b/pkg/controllers/clusterresourceplacement/clusterresourceplacement_controller_test.go @@ -11,10 +11,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -67,11 +67,21 @@ func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { }, Spec: fleetv1beta1.ClusterResourcePlacementSpec{ Policy: placementPolicyForTest(), + ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Service", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"region": "east"}, + }, + }, + }, }, } } -func TestHandleUpdate(t *testing.T) { +func TestGetOrCreateClusterPolicySnapshot(t *testing.T) { wantPolicy := placementPolicyForTest() wantPolicy.NumberOfClusters = nil jsonBytes, err := json.Marshal(wantPolicy) @@ -85,9 +95,10 @@ func TestHandleUpdate(t *testing.T) { } unspecifiedPolicyHash := []byte(fmt.Sprintf("%x", sha256.Sum256(jsonBytes))) tests := []struct { - name string - policySnapshots []fleetv1beta1.ClusterSchedulingPolicySnapshot - wantPolicySnapshots []fleetv1beta1.ClusterSchedulingPolicySnapshot + name string + policySnapshots []fleetv1beta1.ClusterSchedulingPolicySnapshot + wantPolicySnapshots []fleetv1beta1.ClusterSchedulingPolicySnapshot + wantLatestSnapshotIndex int // index of the wantPolicySnapshots array }{ { name: "new clusterResourcePolicy and no existing policy snapshots owned by my-crp", @@ -142,6 +153,7 @@ func TestHandleUpdate(t *testing.T) { }, }, }, + wantLatestSnapshotIndex: 1, }, { name: "crp policy has no change", @@ -201,6 +213,7 @@ func TestHandleUpdate(t *testing.T) { }, }, }, + wantLatestSnapshotIndex: 0, }, { name: "crp policy has changed and there is no active snapshot", @@ -328,6 +341,7 @@ func TestHandleUpdate(t *testing.T) { }, }, }, + wantLatestSnapshotIndex: 2, }, { name: "crp policy has changed and there is an active snapshot", @@ -407,6 +421,7 @@ func TestHandleUpdate(t *testing.T) { }, }, }, + wantLatestSnapshotIndex: 1, }, { name: "crp policy has been changed and reverted back and there is no active snapshot", @@ -511,6 +526,7 @@ func TestHandleUpdate(t *testing.T) { }, }, }, + wantLatestSnapshotIndex: 1, }, { name: "crp policy has not been changed and only the numberOfCluster is changed", @@ -616,6 +632,7 @@ func TestHandleUpdate(t *testing.T) { }, }, }, + wantLatestSnapshotIndex: 1, }, } for _, tc := range tests { @@ -632,21 +649,20 @@ func TestHandleUpdate(t *testing.T) { WithObjects(objects...). Build() r := Reconciler{Client: fakeClient, Scheme: scheme} - got, err := r.handleUpdate(ctx, crp) + got, err := r.getOrCreateClusterPolicySnapshot(ctx, crp) if err != nil { - t.Fatalf("failed to handle update: %v", err) + t.Fatalf("failed to getOrCreateClusterPolicySnapshot: %v", err) + } + options := []cmp.Option{ + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion"), } - want := ctrl.Result{} - if !cmp.Equal(got, want) { - t.Errorf("handleUpdate() = %+v, want %+v", got, want) + if diff := cmp.Diff(tc.wantPolicySnapshots[tc.wantLatestSnapshotIndex], *got, options...); diff != "" { + t.Errorf("getOrCreateClusterPolicySnapshot() mismatch (-want, +got):\n%s", diff) } clusterPolicySnapshotList := &fleetv1beta1.ClusterSchedulingPolicySnapshotList{} if err := fakeClient.List(ctx, clusterPolicySnapshotList); err != nil { t.Fatalf("clusterPolicySnapshot List() got error %v, want no error", err) } - options := []cmp.Option{ - cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion"), - } if diff := cmp.Diff(tc.wantPolicySnapshots, clusterPolicySnapshotList.Items, options...); diff != "" { t.Errorf("clusterPolicysnapShot List() mismatch (-want, +got):\n%s", diff) } @@ -654,7 +670,7 @@ func TestHandleUpdate(t *testing.T) { } } -func TestHandleUpdate_failure(t *testing.T) { +func TestGetOrCreateClusterPolicySnapshot_failure(t *testing.T) { wantPolicy := placementPolicyForTest() wantPolicy.NumberOfClusters = nil jsonBytes, err := json.Marshal(wantPolicy) @@ -873,16 +889,635 @@ func TestHandleUpdate_failure(t *testing.T) { WithObjects(objects...). Build() r := Reconciler{Client: fakeClient, Scheme: scheme} - got, err := r.handleUpdate(ctx, crp) + _, err := r.getOrCreateClusterPolicySnapshot(ctx, crp) if err == nil { // if error is nil - t.Fatal("handleUpdate = nil, want err") + t.Fatal("getOrCreateClusterResourceSnapshot() = nil, want err") } if !errors.Is(err, controller.ErrUnexpectedBehavior) { - t.Errorf("handleUpdate() got %v, want %v type", err, controller.ErrUnexpectedBehavior) + t.Errorf("getOrCreateClusterResourceSnapshot() got %v, want %v type", err, controller.ErrUnexpectedBehavior) } - want := ctrl.Result{} - if !cmp.Equal(got, want) { - t.Errorf("handleUpdate() = %+v, want %+v", got, want) + }) + } +} + +func serviceResourceContentForTest(t *testing.T) *fleetv1beta1.ResourceContent { + svc := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-name", + Namespace: "svc-namespace", + Annotations: map[string]string{ + "svc-annotation-key": "svc-object-annotation-key-value", + }, + Labels: map[string]string{ + "region": "east", + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"svc-spec-selector-key": "svc-spec-selector-value"}, + Ports: []corev1.ServicePort{ + { + Name: "svc-port", + Protocol: corev1.ProtocolTCP, + AppProtocol: pointer.String("svc.com/my-custom-protocol"), + Port: 9001, + }, + }, + }, + } + return createResourceContentForTest(t, svc) +} + +func TestGetOrCreateClusterResourceSnapshot(t *testing.T) { + selectedResources := []fleetv1beta1.ResourceContent{ + *serviceResourceContentForTest(t), + } + policyA := "policy-a" + resourceSnapshotSpecA := &fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: selectedResources, + PolicySnapshotName: policyA, + } + jsonBytes, err := json.Marshal(resourceSnapshotSpecA) + if err != nil { + t.Fatalf("failed to create the resourceSnapshotSpecA hash: %v", err) + } + resourceSnapshotAHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + policyB := "policy-b" + resourceSnapshotSpecB := &fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: selectedResources, + PolicySnapshotName: policyB, + } + + jsonBytes, err = json.Marshal(resourceSnapshotSpecB) + if err != nil { + t.Fatalf("failed to create the policy hash: %v", err) + } + resourceSnapshotBHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + tests := []struct { + name string + resourceSnapshotSpec *fleetv1beta1.ResourceSnapshotSpec + resourceSnapshots []fleetv1beta1.ClusterResourceSnapshot + wantResourceSnapshots []fleetv1beta1.ClusterResourceSnapshot + wantLatestSnapshotIndex int // index of the wantPolicySnapshots array + }{ + { + name: "new resourceSnapshot and no existing snapshots owned by my-crp", + resourceSnapshotSpec: resourceSnapshotSpecA, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "another-crp-1", + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: "another-crp", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "another-crp-1", + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: "another-crp", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + // new resource snapshot owned by the my-crp + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantLatestSnapshotIndex: 1, + }, + { + name: "resource has no change", + resourceSnapshotSpec: resourceSnapshotSpecA, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "resource has changed and there is no active snapshot", + // It happens when last reconcile loop fails after setting the latest label to false and + // before creating a new resource snapshot. + resourceSnapshotSpec: resourceSnapshotSpecB, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + // new resource snapshot + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotBHash, + }, + }, + Spec: *resourceSnapshotSpecB, + }, + }, + wantLatestSnapshotIndex: 1, + }, + { + name: "resource has changed and there is an active snapshot", + resourceSnapshotSpec: resourceSnapshotSpecB, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "false", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + // new resource snapshot + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotBHash, + }, + }, + Spec: *resourceSnapshotSpecB, + }, + }, + wantLatestSnapshotIndex: 1, + }, + { + name: "resource has been changed and reverted back and there is no active snapshot", + resourceSnapshotSpec: resourceSnapshotSpecA, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotAHash, + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + wantLatestSnapshotIndex: 0, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + crp := clusterResourcePlacementForTest() + objects := []client.Object{crp} + for i := range tc.resourceSnapshots { + objects = append(objects, &tc.resourceSnapshots[i]) + } + scheme := serviceScheme(t) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + Scheme: scheme, + } + got, err := r.getOrCreateClusterResourceSnapshot(ctx, crp, tc.resourceSnapshotSpec) + if err != nil { + t.Fatalf("failed to handle getOrCreateClusterResourceSnapshot: %v", err) + } + options := []cmp.Option{ + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion"), + // Fake API server will add a newline for the runtime.RawExtension type. + // ignoring the resourceContent field for now + cmpopts.IgnoreFields(runtime.RawExtension{}, "Raw"), + } + if diff := cmp.Diff(tc.wantResourceSnapshots[tc.wantLatestSnapshotIndex], *got, options...); diff != "" { + t.Errorf("getOrCreateClusterResourceSnapshot() mismatch (-want, +got):\n%s", diff) + } + clusterResourceSnapshotList := &fleetv1beta1.ClusterResourceSnapshotList{} + if err := fakeClient.List(ctx, clusterResourceSnapshotList); err != nil { + t.Fatalf("clusterResourceSnapshot List() got error %v, want no error", err) + } + if diff := cmp.Diff(tc.wantResourceSnapshots, clusterResourceSnapshotList.Items, options...); diff != "" { + t.Errorf("clusterResourceSnapshot List() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestGetOrCreateClusterResourceSnapshot_failure(t *testing.T) { + selectedResources := []fleetv1beta1.ResourceContent{ + *serviceResourceContentForTest(t), + } + policyA := "policy-a" + resourceSnapshotSpecA := &fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: selectedResources, + PolicySnapshotName: policyA, + } + tests := []struct { + name string + resourceSnapshots []fleetv1beta1.ClusterResourceSnapshot + }{ + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "existing active resource snapshot does not have resourceIndex label", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "existing active resource snapshot does not have resourceIndex label", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "existing active policy snapshot does not have hash annotation", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.ResourceIndexLabel: "0", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "no active policy snapshot exists and policySnapshot with invalid resourceIndex label", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.ResourceIndexLabel: "abc", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "multiple active policy snapshot exist", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hashA", + }, + }, + Spec: *resourceSnapshotSpecA, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hashA", + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterPolicySnapshot. + name: "no active resource snapshot exists and resourceSnapshot with invalid resourceIndex label (negative value)", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "-12", + fleetv1beta1.CRPTrackingLabel: testName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testName, + BlockOwnerDeletion: pointer.Bool(true), + Controller: pointer.Bool(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hashA", + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + crp := clusterResourcePlacementForTest() + objects := []client.Object{crp} + for i := range tc.resourceSnapshots { + objects = append(objects, &tc.resourceSnapshots[i]) + } + scheme := serviceScheme(t) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + Scheme: scheme, + } + _, err := r.getOrCreateClusterResourceSnapshot(ctx, crp, resourceSnapshotSpecA) + if err == nil { // if error is nil + t.Fatal("getOrCreateClusterResourceSnapshot() = nil, want err") + } + if !errors.Is(err, controller.ErrUnexpectedBehavior) { + t.Errorf("getOrCreateClusterResourceSnapshot() got %v, want %v type", err, controller.ErrUnexpectedBehavior) } }) } diff --git a/pkg/controllers/clusterresourceplacement/resource_selector_test.go b/pkg/controllers/clusterresourceplacement/resource_selector_test.go index 7f2d26b1b..afe64057b 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector_test.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector_test.go @@ -509,26 +509,30 @@ func TestGenerateResourceContent(t *testing.T) { if err != nil { t.Fatalf("failed to generateResourceContent(): %v", err) } - want, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tt.wantResource) - if err != nil { - t.Fatalf("ToUnstructured failed: %v", err) - } - delete(want["metadata"].(map[string]interface{}), "creationTimestamp") - delete(want, "status") - - uWant := unstructured.Unstructured{Object: want} - rawWant, err := uWant.MarshalJSON() - if err != nil { - t.Fatalf("MarshalJSON failed: %v", err) - } - wantResourceContent := &fleetv1beta1.ResourceContent{ - RawExtension: runtime.RawExtension{ - Raw: rawWant, - }, - } + wantResourceContent := createResourceContentForTest(t, &tt.wantResource) if diff := cmp.Diff(wantResourceContent, got); diff != "" { t.Errorf("generateResourceContent() mismatch (-want, +got):\n%s", diff) } }) } } + +func createResourceContentForTest(t *testing.T, obj interface{}) *fleetv1beta1.ResourceContent { + want, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj) + if err != nil { + t.Fatalf("ToUnstructured failed: %v", err) + } + delete(want["metadata"].(map[string]interface{}), "creationTimestamp") + delete(want, "status") + + uWant := unstructured.Unstructured{Object: want} + rawWant, err := uWant.MarshalJSON() + if err != nil { + t.Fatalf("MarshalJSON failed: %v", err) + } + return &fleetv1beta1.ResourceContent{ + RawExtension: runtime.RawExtension{ + Raw: rawWant, + }, + } +}