diff --git a/config/crd/bases/placement.azure.com_clusterresourcesnapshots.yaml b/config/crd/bases/placement.azure.com_clusterresourcesnapshots.yaml index d7cb25803..362c606c1 100644 --- a/config/crd/bases/placement.azure.com_clusterresourcesnapshots.yaml +++ b/config/crd/bases/placement.azure.com_clusterresourcesnapshots.yaml @@ -35,13 +35,14 @@ spec: index for each such group of resourceSnapshots. The name convention of a clusterResourceSnapshot is {CRPName}-{resourceIndex}(-{subindex})* where the name of the first snapshot of a group has no subindex part so its name - is {CRPName}-{resourceIndex}. Each snapshot MUST have the following labels: - - `CRPTrackingLabel` which points to its owner CRP. - `ResourceIndexLabel` - which is the index of the snapshot group. - `IsLatestSnapshotLabel` which - indicates whether the snapshot is the latest one. \n All the snapshots within - the same index group must have the same ResourceIndexLabel. \n The first - snapshot of the index group MUST have the following annotations: - \"NumberOfResourceSnapshots\" - to store the total number of resource snapshots in the index group. - `ResourceGroupHashAnnotation` + is {CRPName}-{resourceIndex}. resourceIndex will begin with 0. Each snapshot + MUST have the following labels: - `CRPTrackingLabel` which points to its + owner CRP. - `ResourceIndexLabel` which is the index of the snapshot group. + - `IsLatestSnapshotLabel` which indicates whether the snapshot is the latest + one. \n All the snapshots within the same index group must have the same + ResourceIndexLabel. \n The first snapshot of the index group MUST have the + following annotations: - \"NumberOfResourceSnapshots\" to store the total + number of resource snapshots in the index group. - `ResourceGroupHashAnnotation` whose value is the sha-256 hash of all the snapshots belong to the same snapshot index." properties: diff --git a/pkg/controllers/clustershedulingpolicysnapshot/clusterschedulingpolicysnapshot_controller.go b/pkg/controllers/clustershedulingpolicysnapshot/controller.go similarity index 100% rename from pkg/controllers/clustershedulingpolicysnapshot/clusterschedulingpolicysnapshot_controller.go rename to pkg/controllers/clustershedulingpolicysnapshot/controller.go diff --git a/pkg/controllers/clustershedulingpolicysnapshot/controller_integration_test.go b/pkg/controllers/clustershedulingpolicysnapshot/controller_integration_test.go new file mode 100644 index 000000000..2120215c9 --- /dev/null +++ b/pkg/controllers/clustershedulingpolicysnapshot/controller_integration_test.go @@ -0,0 +1,123 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package clustershedulingpolicysnapshot + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" +) + +const ( + testCRPName = "my-crp" + testSnapshotName = "my-snapshot" +) + +func policySnapshot() *fleetv1beta1.ClusterSchedulingPolicySnapshot { + return &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSnapshotName, + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testCRPName, + }, + }, + Spec: fleetv1beta1.SchedulingPolicySnapshotSpec{ + PolicyHash: []byte("hash"), + }, + } +} + +var _ = Describe("Test clusterSchedulingPolicySnapshot Controller", func() { + const ( + timeout = time.Second * 10 + duration = time.Second * 10 + interval = time.Millisecond * 250 + ) + + var ( + createdSnapshot = &fleetv1beta1.ClusterSchedulingPolicySnapshot{} + ) + + BeforeEach(func() { + fakePlacementController.ResetQueue() + By("By creating a new clusterSchedulingPolicySnapshot") + snapshot := policySnapshot() + Expect(k8sClient.Create(ctx, snapshot)).Should(Succeed()) + }) + + Context("When creating new clusterSchedulingPolicySnapshot", func() { + AfterEach(func() { + By("By deleting snapshot") + createdSnapshot := policySnapshot() + Expect(k8sClient.Delete(ctx, createdSnapshot)).Should(Succeed()) + + By("By checking snapshot") + Eventually(func() bool { + return errors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: testSnapshotName}, createdSnapshot)) + }, duration, interval).Should(BeTrue(), "snapshot should be deleted") + }) + + It("Should ignore the event", func() { + By("By checking placement controller queue") + Consistently(func() bool { + return fakePlacementController.Key() == "" + }, duration, interval).Should(BeTrue(), "controller should ignore the create event and not enqueue the request into the placementController queue") + + }) + }) + + Context("When updating clusterSchedulingPolicySnapshot", func() { + AfterEach(func() { + By("By deleting snapshot") + createdSnapshot := policySnapshot() + Expect(k8sClient.Delete(ctx, createdSnapshot)).Should(Succeed()) + + By("By checking snapshot") + Eventually(func() bool { + return errors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: testSnapshotName}, createdSnapshot)) + }, duration, interval).Should(BeTrue(), "snapshot should be deleted") + }) + + It("Should enqueue the event", func() { + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: testSnapshotName}, createdSnapshot)).Should(Succeed()) + + By("By updating the clusterSchedulingPolicySnapshot") + createdSnapshot.Spec.PolicyHash = []byte("modified-hash") + Expect(k8sClient.Update(ctx, createdSnapshot)).Should(Succeed()) + + By("By checking placement controller queue") + Eventually(func() bool { + return fakePlacementController.Key() == testCRPName + }, timeout, interval).Should(BeTrue(), "placementController should receive the CRP name") + }) + }) + + Context("When deleting clusterSchedulingPolicySnapshot", func() { + It("Should ignore the event", func() { + By("By deleting snapshot") + createdSnapshot := policySnapshot() + Expect(k8sClient.Delete(ctx, createdSnapshot)).Should(Succeed()) + + By("By checking snapshot") + Eventually(func() bool { + return errors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: testSnapshotName}, createdSnapshot)) + }, duration, interval).Should(BeTrue(), "snapshot should be deleted") + + By("By checking placement controller queue") + Consistently(func() bool { + return fakePlacementController.Key() == "" + }, duration, interval).Should(BeTrue(), "controller should ignore the delete event and not enqueue the request into the placementController queue") + }) + }) +}) diff --git a/pkg/controllers/clustershedulingpolicysnapshot/suite_test.go b/pkg/controllers/clustershedulingpolicysnapshot/suite_test.go new file mode 100644 index 000000000..b21ed18c6 --- /dev/null +++ b/pkg/controllers/clustershedulingpolicysnapshot/suite_test.go @@ -0,0 +1,104 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package clustershedulingpolicysnapshot + +import ( + "context" + "flag" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + + fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/test/utils/controller" +) + +var ( + cfg *rest.Config + mgr manager.Manager + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + fakePlacementController *controller.FakeController +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "ClusterSchedulingPolicySnapshot Controller Suite") +} + +var _ = BeforeSuite(func() { + klog.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("../../../", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).Should(Succeed()) + Expect(cfg).NotTo(BeNil()) + + err = fleetv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + By("construct the k8s client") + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).Should(Succeed()) + Expect(k8sClient).NotTo(BeNil()) + + By("starting the controller manager") + klog.InitFlags(flag.CommandLine) + flag.Parse() + + mgr, err = ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + MetricsBindAddress: "0", + Logger: klogr.NewWithOptions(klogr.WithFormat(klogr.FormatKlog)), + }) + Expect(err).Should(Succeed()) + + fakePlacementController = &controller.FakeController{} + + err = (&Reconciler{ + Client: mgr.GetClient(), + PlacementController: fakePlacementController, + }).SetupWithManager(mgr) + Expect(err).Should(Succeed()) + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).Should(Succeed(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + defer klog.Flush() + + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).Should(Succeed()) +}) diff --git a/test/utils/controller/controller.go b/test/utils/controller/controller.go new file mode 100644 index 000000000..4540280e4 --- /dev/null +++ b/test/utils/controller/controller.go @@ -0,0 +1,48 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +// Package controller provides a fake controller for testing. +package controller + +import ( + "context" + "sync" +) + +// FakeController is a fake controller which only stores one key. +type FakeController struct { + key string + mu sync.RWMutex +} + +// ResetQueue resets the value in the queue. +func (f *FakeController) ResetQueue() { + f.mu.Lock() + defer f.mu.Unlock() + f.key = "" +} + +// Enqueue enqueues a string type key. +func (f *FakeController) Enqueue(obj interface{}) { + key, ok := obj.(string) + if !ok { + return + } + f.mu.Lock() + f.key = key + f.mu.Unlock() +} + +// Run does nothing. +func (f *FakeController) Run(_ context.Context, _ int) error { + return nil +} + +// Key returns the key stored in the queue. +func (f *FakeController) Key() string { + f.mu.RLock() + defer f.mu.RUnlock() + return f.key +}