From 2b88073eb08e703694004323ea86dc5855f34704 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Fri, 10 Apr 2026 17:48:52 -0600 Subject: [PATCH 01/16] dnm: test olmv1 deployment Signed-off-by: Wesley Hayutin --- Makefile | 39 ++++ config/manager/kustomization.yaml | 4 +- tests/olmv1/.gitignore | 1 + tests/olmv1/olmv1_install_test.go | 264 ++++++++++++++++++++++ tests/olmv1/olmv1_suite_test.go | 357 ++++++++++++++++++++++++++++++ 5 files changed, 663 insertions(+), 2 deletions(-) create mode 100644 tests/olmv1/.gitignore create mode 100644 tests/olmv1/olmv1_install_test.go create mode 100644 tests/olmv1/olmv1_suite_test.go diff --git a/Makefile b/Makefile index 9d10c0d358b..a1efa59b945 100644 --- a/Makefile +++ b/Makefile @@ -1017,6 +1017,45 @@ test-e2e-cleanup: login-required $(OC_CLI) delete ns mysql-persistent --ignore-not-found=true rm -rf $(SETTINGS_TMP) +##@ OLMv1 Tests + +OLMV1_PACKAGE ?= oadp-operator +OLMV1_NAMESPACE ?= $(OADP_TEST_NAMESPACE) +OLMV1_CHANNEL ?= +OLMV1_VERSION ?= +OLMV1_UPGRADE_VERSION ?= +OLMV1_CATALOG ?= oadp-olmv1-test-catalog +OLMV1_CATALOG_IMAGE ?= +OLMV1_SERVICE_ACCOUNT ?= oadp-olmv1-installer +OLMV1_FAIL_FAST ?= true + +OLMV1_GINKGO_FLAGS = --vv \ + --no-color=$(OPENSHIFT_CI) \ + --label-filter="olmv1" \ + --junit-report="$(ARTIFACT_DIR)/junit_olmv1_report.xml" \ + --fail-fast=$(OLMV1_FAIL_FAST) \ + --timeout=30m + +.PHONY: test-olmv1 +test-olmv1: login-required install-ginkgo ## Run OLMv1 lifecycle tests (install, verify, upgrade, cleanup) against a cluster with OLMv1 enabled. + ginkgo run -mod=mod $(OLMV1_GINKGO_FLAGS) $(GINKGO_ARGS) tests/olmv1/ -- \ + -namespace=$(OLMV1_NAMESPACE) \ + -package=$(OLMV1_PACKAGE) \ + -channel=$(OLMV1_CHANNEL) \ + -version=$(OLMV1_VERSION) \ + -upgrade-version=$(OLMV1_UPGRADE_VERSION) \ + -catalog=$(OLMV1_CATALOG) \ + -catalog-image=$(OLMV1_CATALOG_IMAGE) \ + -service-account=$(OLMV1_SERVICE_ACCOUNT) \ + -artifact_dir=$(ARTIFACT_DIR) + +.PHONY: test-olmv1-cleanup +test-olmv1-cleanup: login-required ## Cleanup resources created by OLMv1 tests. + $(OC_CLI) delete clusterextension oadp-operator --ignore-not-found=true + $(OC_CLI) delete clustercatalog $(OLMV1_CATALOG) --ignore-not-found=true + $(OC_CLI) delete clusterrolebinding $(OLMV1_SERVICE_ACCOUNT)-cluster-admin --ignore-not-found=true + $(OC_CLI) delete sa $(OLMV1_SERVICE_ACCOUNT) -n $(OLMV1_NAMESPACE) --ignore-not-found=true + .PHONY: update-non-admin-manifests update-non-admin-manifests: NON_ADMIN_CONTROLLER_IMG?=quay.io/konveyor/oadp-non-admin:latest update-non-admin-manifests: yq ## Update Non Admin Controller (NAC) manifests shipped with OADP, from NON_ADMIN_CONTROLLER_PATH diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index d16337ed00d..4194e1b1228 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/konveyor/oadp-operator - newTag: latest + newName: ttl.sh/oadp-operator-7e53a850 + newTag: 1h diff --git a/tests/olmv1/.gitignore b/tests/olmv1/.gitignore new file mode 100644 index 00000000000..3fec32c8427 --- /dev/null +++ b/tests/olmv1/.gitignore @@ -0,0 +1 @@ +tmp/ diff --git a/tests/olmv1/olmv1_install_test.go b/tests/olmv1/olmv1_install_test.go new file mode 100644 index 00000000000..e5671f740e1 --- /dev/null +++ b/tests/olmv1/olmv1_install_test.go @@ -0,0 +1,264 @@ +package olmv1_test + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + clusterExtensionName = "oadp-operator" + + oadpCRDName = "dataprotectionapplications.oadp.openshift.io" + veleroCRDName = "backups.velero.io" + restoreCRDName = "restores.velero.io" + + managerLabelSelector = "control-plane=controller-manager" +) + +var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("olmv1"), func() { + ctx := context.Background() + + ginkgo.BeforeAll(func() { + ginkgo.By("Cleaning up orphaned OADP/Velero CRDs from previous installs") + cleanupOrphanedCRDs(ctx) + + ginkgo.By("Setting up namespace, ServiceAccount, and RBAC") + ensureNamespace(ctx, namespace) + ensureServiceAccount(ctx, serviceAccountName, namespace) + ensureClusterAdminBinding(ctx, serviceAccountName, namespace) + + if catalogImage != "" { + ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, catalogImage)) + ensureClusterCatalog(ctx, catalogName, catalogImage) + waitForClusterCatalogServing(ctx, catalogName) + } + }) + + ginkgo.AfterAll(func() { + ginkgo.By("Cleaning up OLMv1 test resources") + err := deleteClusterExtension(ctx, clusterExtensionName) + if err != nil { + log.Printf("Warning: failed to delete ClusterExtension: %v", err) + } + + gomega.Eventually(func() bool { + _, err := getClusterExtension(ctx, clusterExtensionName) + return apierrors.IsNotFound(err) + }, 3*time.Minute, 5*time.Second).Should(gomega.BeTrue(), "ClusterExtension should be deleted") + + if createdCatalog { + ginkgo.By(fmt.Sprintf("Deleting ClusterCatalog %s", catalogName)) + deleteClusterCatalog(ctx, catalogName) + } + + cleanupClusterRoleBinding(ctx, serviceAccountName) + }) + + ginkgo.It("should install OADP operator via ClusterExtension", func() { + ginkgo.By("Creating the ClusterExtension") + ce := buildClusterExtension(clusterExtensionName, packageName, namespace, serviceAccountName) + _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", clusterExtensionName, packageName, namespace) + + ginkgo.By("Waiting for ClusterExtension to be installed") + terminalReasons := map[string]bool{ + "InvalidConfiguration": true, + "Failed": true, + } + gomega.Eventually(func(g gomega.Gomega) { + obj, err := getClusterExtension(ctx, clusterExtensionName) + g.Expect(err).NotTo(gomega.HaveOccurred(), "ClusterExtension should exist") + + logAllConditions(obj) + + progCond, progFound := getCondition(obj, "Progressing") + if progFound { + reason, _ := progCond["reason"].(string) + message, _ := progCond["message"].(string) + g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(), + "ClusterExtension has terminal error on Progressing: reason=%s message=%s", reason, message) + } + + instCond, instFound := getCondition(obj, "Installed") + g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present") + status, _ := instCond["status"].(string) + g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") + }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) + + ginkgo.By("Checking installed bundle info") + obj, err := getClusterExtension(ctx, clusterExtensionName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bundleName, bundleVersion, found := getInstalledBundle(obj) + gomega.Expect(found).To(gomega.BeTrue(), "installed bundle should be present in status") + log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion) + }) + + ginkgo.It("should have the OADP controller-manager pod running", func() { + ginkgo.By("Waiting for controller-manager pod to be Running") + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: managerLabelSelector, + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + log.Printf("Controller-manager pod %s is Running", pod.Name) + return true, nil + } + log.Printf("Controller-manager pod %s phase: %s", pod.Name, pod.Status.Phase) + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running") + }) + + ginkgo.It("should have OADP CRDs installed", func() { + expectedCRDs := []string{ + oadpCRDName, + veleroCRDName, + restoreCRDName, + "schedules.velero.io", + "backupstoragelocations.velero.io", + "volumesnapshotlocations.velero.io", + } + + for _, crdName := range expectedCRDs { + ginkgo.By(fmt.Sprintf("Checking CRD %s exists", crdName)) + exists, err := crdExists(ctx, crdName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("CRD %s should exist", crdName)) + log.Printf("CRD %s exists", crdName) + } + }) + + ginkgo.It("should not report deprecation warnings", func() { + obj, err := getClusterExtension(ctx, clusterExtensionName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, condType := range []string{"Deprecated", "PackageDeprecated", "ChannelDeprecated", "BundleDeprecated"} { + cond, found := getCondition(obj, condType) + if found { + status, _ := cond["status"].(string) + gomega.Expect(status).To(gomega.Equal("False"), + fmt.Sprintf("%s condition should be False, got %s", condType, status)) + } + } + }) + + ginkgo.When("upgrading the operator", func() { + ginkgo.BeforeAll(func() { + if upgradeVersion == "" { + ginkgo.Skip("No --upgrade-version specified, skipping upgrade tests") + } + }) + + ginkgo.It("should upgrade the ClusterExtension to the target version", func() { + ginkgo.By(fmt.Sprintf("Patching ClusterExtension version to %s", upgradeVersion)) + obj, err := getClusterExtension(ctx, clusterExtensionName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + previousBundleName, previousVersion, _ := getInstalledBundle(obj) + log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion) + + catalogSpec, _, _ := unstructuredNestedMap(obj.Object, "spec", "source", "catalog") + gomega.Expect(catalogSpec).NotTo(gomega.BeNil()) + catalogSpec["version"] = upgradeVersion + catalogSpec["upgradeConstraintPolicy"] = "SelfCertified" + err = unstructuredSetNestedMap(obj.Object, catalogSpec, "spec", "source", "catalog") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = dynamicClient.Resource(clusterExtensionGVR).Update(ctx, obj, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Patched ClusterExtension version to %s", upgradeVersion) + + ginkgo.By("Waiting for upgrade to complete") + gomega.Eventually(func() string { + updated, err := getClusterExtension(ctx, clusterExtensionName) + if err != nil { + return "" + } + + cond, found := getCondition(updated, "Installed") + if !found { + return "" + } + status, _ := cond["status"].(string) + if status != "True" { + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + log.Printf("Installed condition: status=%s reason=%s message=%s", status, reason, message) + return "" + } + + _, bundleVer, found := getInstalledBundle(updated) + if !found { + return "" + } + log.Printf("Installed bundle version: %s", bundleVer) + return bundleVer + }, 10*time.Minute, 10*time.Second).ShouldNot(gomega.Equal(previousVersion), + "Installed bundle version should change after upgrade") + + ginkgo.By("Verifying controller-manager pod is running after upgrade") + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: managerLabelSelector, + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + return true, nil + } + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue()) + }) + }) +}) + +func unstructuredNestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + var current interface{} = obj + for _, field := range fields { + m, ok := current.(map[string]interface{}) + if !ok { + return nil, false, fmt.Errorf("expected map at field %s", field) + } + current, ok = m[field] + if !ok { + return nil, false, nil + } + } + result, ok := current.(map[string]interface{}) + if !ok { + return nil, false, fmt.Errorf("final value is not a map") + } + return result, true, nil +} + +func unstructuredSetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { + if len(fields) == 0 { + return fmt.Errorf("no fields specified") + } + current := obj + for _, field := range fields[:len(fields)-1] { + next, ok := current[field].(map[string]interface{}) + if !ok { + return fmt.Errorf("expected map at field %s", field) + } + current = next + } + current[fields[len(fields)-1]] = value + return nil +} diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go new file mode 100644 index 00000000000..7494879baaf --- /dev/null +++ b/tests/olmv1/olmv1_suite_test.go @@ -0,0 +1,357 @@ +package olmv1_test + +import ( + "context" + "flag" + "log" + "strings" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +var ( + namespace string + packageName string + channel string + version string + upgradeVersion string + catalogName string + catalogImage string + serviceAccountName string + artifactDir string + + createdCatalog bool + + kubeClient *kubernetes.Clientset + dynamicClient dynamic.Interface + + clusterExtensionGVR = schema.GroupVersionResource{ + Group: "olm.operatorframework.io", + Version: "v1", + Resource: "clusterextensions", + } + + clusterCatalogGVR = schema.GroupVersionResource{ + Group: "olm.operatorframework.io", + Version: "v1", + Resource: "clustercatalogs", + } +) + +func init() { + flag.StringVar(&namespace, "namespace", "openshift-adp", "Namespace to install the operator into") + flag.StringVar(&packageName, "package", "oadp-operator", "OLM package name for the operator") + flag.StringVar(&channel, "channel", "", "Catalog channel (optional)") + flag.StringVar(&version, "version", "", "Version to install (optional, e.g. '1.5.1' or '1.5.x')") + flag.StringVar(&upgradeVersion, "upgrade-version", "", "Version to upgrade to (optional)") + flag.StringVar(&catalogName, "catalog", "oadp-olmv1-test-catalog", "ClusterCatalog name to create or reference") + flag.StringVar(&catalogImage, "catalog-image", "", "Catalog image to use for creating a ClusterCatalog (required when package is not in default catalogs)") + flag.StringVar(&serviceAccountName, "service-account", "oadp-olmv1-installer", "ServiceAccount name for ClusterExtension") + flag.StringVar(&artifactDir, "artifact_dir", "/tmp", "Directory for test artifacts") +} + +func TestOADPOLMv1(t *testing.T) { + flag.Parse() + gomega.RegisterFailHandler(ginkgo.Fail) + + kubeConfig := config.GetConfigOrDie() + kubeConfig.QPS = 50 + kubeConfig.Burst = 100 + + var err error + kubeClient, err = kubernetes.NewForConfig(kubeConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + dynamicClient, err = dynamic.NewForConfig(kubeConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.RunSpecs(t, "OADP OLMv1 Suite") +} + +// --- Helpers --- + +func ensureNamespace(ctx context.Context, name string) { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} + _, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created namespace %s", name) +} + +func ensureServiceAccount(ctx context.Context, name, ns string) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + } + _, err := kubeClient.CoreV1().ServiceAccounts(ns).Create(ctx, sa, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ServiceAccount %s/%s", ns, name) +} + +// ensureClusterAdminBinding grants cluster-admin to the installer SA. +// This is intentionally broad for testing; production should use least-privilege RBAC. +func ensureClusterAdminBinding(ctx context.Context, saName, ns string) { + bindingName := saName + "-cluster-admin" + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: bindingName}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: saName, Namespace: ns}, + }, + } + _, err := kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ClusterRoleBinding %s", bindingName) +} + +func buildClusterExtension(name, pkg, ns, sa string) *unstructured.Unstructured { + spec := map[string]interface{}{ + "namespace": ns, + "serviceAccount": map[string]interface{}{ + "name": sa, + }, + "source": map[string]interface{}{ + "sourceType": "Catalog", + "catalog": map[string]interface{}{ + "packageName": pkg, + }, + }, + // OwnNamespace operators require watchNamespace to tell OLMv1 + // which namespace the operator should watch. Set it to the + // install namespace so it mirrors OLMv0 OwnNamespace behavior. + "config": map[string]interface{}{ + "configType": "Inline", + "inline": map[string]interface{}{ + "watchNamespace": ns, + }, + }, + } + + ce := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "olm.operatorframework.io/v1", + "kind": "ClusterExtension", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": spec, + }, + } + + catalogSpec := spec["source"].(map[string]interface{})["catalog"].(map[string]interface{}) + if catalogImage != "" { + catalogSpec["selector"] = map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "olm.operatorframework.io/metadata.name": catalogName, + }, + } + } + if channel != "" { + catalogSpec["channels"] = []interface{}{channel} + } + if version != "" { + catalogSpec["version"] = version + } + + return ce +} + +func getClusterExtension(ctx context.Context, name string) (*unstructured.Unstructured, error) { + return dynamicClient.Resource(clusterExtensionGVR).Get(ctx, name, metav1.GetOptions{}) +} + +func deleteClusterExtension(ctx context.Context, name string) error { + err := dynamicClient.Resource(clusterExtensionGVR).Delete(ctx, name, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + return err +} + +func getCondition(obj *unstructured.Unstructured, condType string) (map[string]interface{}, bool) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil || !found { + return nil, false + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + if cond["type"] == condType { + return cond, true + } + } + return nil, false +} + +func logAllConditions(obj *unstructured.Unstructured) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil || !found { + log.Print(" No conditions present yet") + return + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + condType, _ := cond["type"].(string) + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + if len(message) > 120 { + message = message[:120] + "..." + } + log.Printf(" %s: status=%s reason=%s message=%s", condType, status, reason, message) + } +} + +func getInstalledBundle(obj *unstructured.Unstructured) (name string, ver string, found bool) { + bundleName, _, _ := unstructured.NestedString(obj.Object, "status", "install", "bundle", "name") + bundleVersion, _, _ := unstructured.NestedString(obj.Object, "status", "install", "bundle", "version") + if bundleName != "" { + return bundleName, bundleVersion, true + } + return "", "", false +} + +func crdExists(ctx context.Context, name string) (bool, error) { + crdGVR := schema.GroupVersionResource{ + Group: apiextensionsv1.SchemeGroupVersion.Group, + Version: apiextensionsv1.SchemeGroupVersion.Version, + Resource: "customresourcedefinitions", + } + _, err := dynamicClient.Resource(crdGVR).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func cleanupClusterRoleBinding(ctx context.Context, saName string) { + bindingName := saName + "-cluster-admin" + err := kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, bindingName, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete ClusterRoleBinding %s: %v", bindingName, err) + } +} + +// cleanupOrphanedCRDs deletes any OADP or Velero CRDs left behind by a +// previous OLMv0 deployment or a prior test run. OLMv1 cannot adopt CRDs +// it did not create, so these must be removed before a fresh install. +func cleanupOrphanedCRDs(ctx context.Context) { + crdGVR := schema.GroupVersionResource{ + Group: apiextensionsv1.SchemeGroupVersion.Group, + Version: apiextensionsv1.SchemeGroupVersion.Version, + Resource: "customresourcedefinitions", + } + crdList, err := dynamicClient.Resource(crdGVR).List(ctx, metav1.ListOptions{}) + if err != nil { + log.Printf("Warning: failed to list CRDs: %v", err) + return + } + var deleted int + for _, crd := range crdList.Items { + name := crd.GetName() + if strings.HasSuffix(name, ".oadp.openshift.io") || strings.HasSuffix(name, ".velero.io") { + if err := dynamicClient.Resource(crdGVR).Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete CRD %s: %v", name, err) + } else { + deleted++ + } + } + } + if deleted > 0 { + log.Printf("Deleted %d orphaned OADP/Velero CRDs", deleted) + } +} + +func ensureClusterCatalog(ctx context.Context, name, image string) { + cc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "olm.operatorframework.io/v1", + "kind": "ClusterCatalog", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": map[string]interface{}{ + "source": map[string]interface{}{ + "type": "Image", + "image": map[string]interface{}{ + "ref": image, + }, + }, + }, + }, + } + _, err := dynamicClient.Resource(clusterCatalogGVR).Create(ctx, cc, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + log.Printf("ClusterCatalog %s already exists", name) + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + createdCatalog = true + log.Printf("Created ClusterCatalog %s with image %s", name, image) +} + +func waitForClusterCatalogServing(ctx context.Context, name string) { + gomega.Eventually(func() bool { + obj, err := dynamicClient.Resource(clusterCatalogGVR).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + log.Printf("Error getting ClusterCatalog %s: %v", name, err) + return false + } + conditions, found, _ := unstructured.NestedSlice(obj.Object, "status", "conditions") + if !found { + return false + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + if cond["type"] == "Serving" { + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + log.Printf("ClusterCatalog %s Serving: status=%s reason=%s", name, status, reason) + return status == "True" + } + } + return false + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "ClusterCatalog should be Serving") +} + +func deleteClusterCatalog(ctx context.Context, name string) { + err := dynamicClient.Resource(clusterCatalogGVR).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete ClusterCatalog %s: %v", name, err) + } +} From 76f9c167ce98656bc97e3c409fd088bce3d14a5a Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Mon, 13 Apr 2026 13:41:41 -0600 Subject: [PATCH 02/16] fix unit tests Signed-off-by: Wesley Hayutin --- Makefile | 2 +- config/manager/kustomization.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index a1efa59b945..027571e7883 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,7 @@ vet: check-go ## Run go vet against code. .PHONY: test test: check-go vet envtest ## Run unit tests; run Go linters checks; check if api and bundle folders are up to date; and check if go dependencies are valid @make versions - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e) -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e | grep -v /tests/olmv1) -coverprofile cover.out @make lint @make api-isupdated @make bundle-isupdated diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 4194e1b1228..d16337ed00d 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: ttl.sh/oadp-operator-7e53a850 - newTag: 1h + newName: quay.io/konveyor/oadp-operator + newTag: latest From ac1680312fca7eece9cab09013817cc638e8fd2c Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Mon, 13 Apr 2026 14:39:30 -0600 Subject: [PATCH 03/16] update lint Signed-off-by: Wesley Hayutin --- .golangci.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.golangci.yaml b/.golangci.yaml index d8ad4cb082f..f9f13476bd2 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -77,6 +77,7 @@ linters: - third_party$ - builtin$ - examples$ + - tests/olmv1 issues: max-issues-per-linter: 0 max-same-issues: 0 From 565e1782ca890cf7108a08ab3c8582d9d51886ef Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Tue, 5 May 2026 17:39:35 -0400 Subject: [PATCH 04/16] Enhance OLMv1 migration support: update service account naming, add manifest generation, and implement migration tests Signed-off-by: Tiger Kaovilai --- Makefile | 104 +++++++++- .../oadp-operator.clusterserviceversion.yaml | 2 +- .../oadp-operator.clusterserviceversion.yaml | 2 +- tests/olmv1/olmv1_migrate_test.go | 192 ++++++++++++++++++ tests/olmv1/olmv1_suite_test.go | 8 +- 5 files changed, 300 insertions(+), 8 deletions(-) create mode 100644 tests/olmv1/olmv1_migrate_test.go diff --git a/Makefile b/Makefile index 027571e7883..0479ee7578c 100644 --- a/Makefile +++ b/Makefile @@ -1026,7 +1026,8 @@ OLMV1_VERSION ?= OLMV1_UPGRADE_VERSION ?= OLMV1_CATALOG ?= oadp-olmv1-test-catalog OLMV1_CATALOG_IMAGE ?= -OLMV1_SERVICE_ACCOUNT ?= oadp-olmv1-installer +OLMV1_SERVICE_ACCOUNT ?= $(OLMV1_PACKAGE)-installer +OLMV1_INSTALLER_BINDING ?= $(OLMV1_SERVICE_ACCOUNT)-binding OLMV1_FAIL_FAST ?= true OLMV1_GINKGO_FLAGS = --vv \ @@ -1051,11 +1052,108 @@ test-olmv1: login-required install-ginkgo ## Run OLMv1 lifecycle tests (install, .PHONY: test-olmv1-cleanup test-olmv1-cleanup: login-required ## Cleanup resources created by OLMv1 tests. - $(OC_CLI) delete clusterextension oadp-operator --ignore-not-found=true + $(OC_CLI) delete clusterextension $(OLMV1_PACKAGE) --ignore-not-found=true $(OC_CLI) delete clustercatalog $(OLMV1_CATALOG) --ignore-not-found=true - $(OC_CLI) delete clusterrolebinding $(OLMV1_SERVICE_ACCOUNT)-cluster-admin --ignore-not-found=true + $(OC_CLI) delete clusterrolebinding $(OLMV1_INSTALLER_BINDING) --ignore-not-found=true $(OC_CLI) delete sa $(OLMV1_SERVICE_ACCOUNT) -n $(OLMV1_NAMESPACE) --ignore-not-found=true +OLMV1_MANIFEST ?= oadp-olmv1-manifest.yaml + +.PHONY: generate-olmv1-manifest +generate-olmv1-manifest: ## Generate OLMv1 install manifest (Namespace, SA, CRB, ClusterExtension) per OCPSTRAT-2268 template. + @printf '%s\n' \ + '---' \ + 'apiVersion: v1' \ + 'kind: Namespace' \ + 'metadata:' \ + ' name: $(OLMV1_NAMESPACE)' \ + '---' \ + 'apiVersion: v1' \ + 'kind: ServiceAccount' \ + 'metadata:' \ + ' name: $(OLMV1_SERVICE_ACCOUNT)' \ + ' namespace: $(OLMV1_NAMESPACE)' \ + '---' \ + 'apiVersion: rbac.authorization.k8s.io/v1' \ + 'kind: ClusterRoleBinding' \ + 'metadata:' \ + ' name: $(OLMV1_INSTALLER_BINDING)' \ + 'roleRef:' \ + ' apiGroup: rbac.authorization.k8s.io' \ + ' kind: ClusterRole' \ + ' name: cluster-admin' \ + 'subjects:' \ + '- kind: ServiceAccount' \ + ' name: $(OLMV1_SERVICE_ACCOUNT)' \ + ' namespace: $(OLMV1_NAMESPACE)' \ + '---' \ + 'apiVersion: olm.operatorframework.io/v1' \ + 'kind: ClusterExtension' \ + 'metadata:' \ + ' name: $(OLMV1_PACKAGE)' \ + 'spec:' \ + ' namespace: $(OLMV1_NAMESPACE)' \ + ' serviceAccount:' \ + ' name: $(OLMV1_SERVICE_ACCOUNT)' \ + ' config:' \ + ' configType: Inline' \ + ' inline:' \ + ' watchNamespace: $(OLMV1_NAMESPACE)' \ + ' source:' \ + ' sourceType: Catalog' \ + ' catalog:' \ + ' packageName: $(OLMV1_PACKAGE)' \ + > $(OLMV1_MANIFEST) + @if [ -n "$(OLMV1_CHANNEL)" ]; then \ + printf ' channel: %s\n' '$(OLMV1_CHANNEL)' >> $(OLMV1_MANIFEST); \ + fi + @if [ -n "$(OLMV1_VERSION)" ]; then \ + printf ' version: "%s"\n' '$(OLMV1_VERSION)' >> $(OLMV1_MANIFEST); \ + fi + @echo "Generated $(OLMV1_MANIFEST)" + +.PHONY: upgrade-v0-to-olmv1 +upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to OLMv1 (ClusterExtension). Requires OCP 4.20+. + $(OC_CLI) whoami + @echo "=== Phase 1: Removing OLMv0 resources ===" + -$(OC_CLI) delete subscription oadp-operator -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true + -$(OC_CLI) get subscription -n $(OADP_TEST_NAMESPACE) -o name 2>/dev/null | \ + xargs -I {} sh -c '$(OC_CLI) get {} -n $(OADP_TEST_NAMESPACE) -o jsonpath='"'"'{.metadata.name}{"\t"}{.spec.source}{"\n"}'"'"' 2>/dev/null' | \ + grep "$(CATALOG_SOURCE_NAME)" | cut -f1 | \ + xargs -I {} $(OC_CLI) delete subscription {} -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + -$(OC_CLI) delete csv -l operators.coreos.com/oadp-operator.$(OADP_TEST_NAMESPACE) -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true + -$(OC_CLI) get csv -n $(OADP_TEST_NAMESPACE) -o name 2>/dev/null | grep oadp-operator | \ + xargs -I {} $(OC_CLI) delete {} -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + -$(OC_CLI) delete operatorgroup oadp-operator-group -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true + -$(OC_CLI) delete catalogsource $(CATALOG_SOURCE_NAME) -n $(CATALOG_SOURCE_NAMESPACE) --ignore-not-found=true + @echo "=== Phase 2: Removing orphaned OADP/Velero CRDs ===" + # OLMv1 cannot adopt CRDs it did not create + -$(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io' | \ + xargs -r $(OC_CLI) delete --ignore-not-found=true || true + @echo "=== Phase 3: Applying OLMv1 manifest ===" + $(MAKE) generate-olmv1-manifest + $(OC_CLI) apply -f $(OLMV1_MANIFEST) + @echo "=== Phase 4: Waiting for ClusterExtension Installed=True ===" + $(OC_CLI) wait clusterextension/$(OLMV1_PACKAGE) \ + --for=condition=Installed=True --timeout=600s + @echo "Migration complete." + $(OC_CLI) get clusterextension $(OLMV1_PACKAGE) + +.PHONY: test-upgrade-v0-to-olmv1 +test-upgrade-v0-to-olmv1: login-required install-ginkgo ## Test OLMv0->OLMv1 migration path. Expects a pre-existing OLMv0 OADP install (run make deploy-olm first). + ginkgo run -mod=mod $(OLMV1_GINKGO_FLAGS) \ + --label-filter="olmv1-migrate" \ + $(GINKGO_ARGS) tests/olmv1/ -- \ + -namespace=$(OLMV1_NAMESPACE) \ + -package=$(OLMV1_PACKAGE) \ + -channel=$(OLMV1_CHANNEL) \ + -version=$(OLMV1_VERSION) \ + -catalog=$(OLMV1_CATALOG) \ + -catalog-image=$(OLMV1_CATALOG_IMAGE) \ + -service-account=$(OLMV1_SERVICE_ACCOUNT) \ + -migrate=true \ + -artifact_dir=$(ARTIFACT_DIR) + .PHONY: update-non-admin-manifests update-non-admin-manifests: NON_ADMIN_CONTROLLER_IMG?=quay.io/konveyor/oadp-non-admin:latest update-non-admin-manifests: yq ## Update Non Admin Controller (NAC) manifests shipped with OADP, from NON_ADMIN_CONTROLLER_PATH diff --git a/bundle/manifests/oadp-operator.clusterserviceversion.yaml b/bundle/manifests/oadp-operator.clusterserviceversion.yaml index 44481ede449..81cc9d6c757 100644 --- a/bundle/manifests/oadp-operator.clusterserviceversion.yaml +++ b/bundle/manifests/oadp-operator.clusterserviceversion.yaml @@ -1639,7 +1639,7 @@ spec: installModes: - supported: true type: OwnNamespace - - supported: false + - supported: true type: SingleNamespace - supported: false type: MultiNamespace diff --git a/config/manifests/bases/oadp-operator.clusterserviceversion.yaml b/config/manifests/bases/oadp-operator.clusterserviceversion.yaml index 798be1cc83c..95939eaab1d 100644 --- a/config/manifests/bases/oadp-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/oadp-operator.clusterserviceversion.yaml @@ -467,7 +467,7 @@ spec: installModes: - supported: true type: OwnNamespace - - supported: false + - supported: true type: SingleNamespace - supported: false type: MultiNamespace diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go new file mode 100644 index 00000000000..f5fc135fc87 --- /dev/null +++ b/tests/olmv1/olmv1_migrate_test.go @@ -0,0 +1,192 @@ +package olmv1_test + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + subscriptionGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "subscriptions", + } + csvGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "clusterserviceversions", + } + operatorGroupGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1", + Resource: "operatorgroups", + } +) + +var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo.Label("olmv1-migrate"), func() { + ctx := context.Background() + + ginkgo.BeforeAll(func() { + if !migrate { + ginkgo.Skip("Migration tests disabled (pass -migrate=true to enable)") + } + + ginkgo.By("Verifying OLMv0 resources exist before migration") + subs, err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil || len(subs.Items) == 0 { + ginkgo.Skip(fmt.Sprintf("No OLMv0 Subscription found in %s — run 'make deploy-olm' first", namespace)) + } + for _, sub := range subs.Items { + log.Printf("Found OLMv0 Subscription: %s", sub.GetName()) + } + }) + + ginkgo.It("should remove OLMv0 Subscriptions", func() { + subs, err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, sub := range subs.Items { + ginkgo.By(fmt.Sprintf("Deleting Subscription %s", sub.GetName())) + err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).Delete(ctx, sub.GetName(), metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + gomega.Eventually(func() int { + list, _ := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + return len(list.Items) + }, 1*time.Minute, 5*time.Second).Should(gomega.Equal(0)) + }) + + ginkgo.It("should remove OLMv0 CSVs", func() { + csvs, err := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, csv := range csvs.Items { + name := csv.GetName() + ginkgo.By(fmt.Sprintf("Deleting CSV %s", name)) + err := dynamicClient.Resource(csvGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + gomega.Eventually(func() int { + list, _ := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + return len(list.Items) + }, 2*time.Minute, 5*time.Second).Should(gomega.Equal(0)) + }) + + ginkgo.It("should remove OLMv0 OperatorGroup", func() { + ogs, err := dynamicClient.Resource(operatorGroupGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, og := range ogs.Items { + ginkgo.By(fmt.Sprintf("Deleting OperatorGroup %s", og.GetName())) + err := dynamicClient.Resource(operatorGroupGVR).Namespace(namespace).Delete(ctx, og.GetName(), metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + }) + + ginkgo.It("should clean orphaned OADP/Velero CRDs", func() { + ginkgo.By("Deleting orphaned CRDs that OLMv1 cannot adopt") + cleanupOrphanedCRDs(ctx) + }) + + ginkgo.It("should install OADP via OLMv1 ClusterExtension", func() { + ginkgo.By("Setting up installer ServiceAccount and RBAC") + ensureNamespace(ctx, namespace) + ensureServiceAccount(ctx, serviceAccountName, namespace) + ensureClusterAdminBinding(ctx, serviceAccountName, namespace) + + if catalogImage != "" { + ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s", catalogName)) + ensureClusterCatalog(ctx, catalogName, catalogImage) + waitForClusterCatalogServing(ctx, catalogName) + } + + ginkgo.By("Creating the ClusterExtension") + ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) + _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for ClusterExtension to be installed") + terminalReasons := map[string]bool{ + "InvalidConfiguration": true, + "Failed": true, + } + gomega.Eventually(func(g gomega.Gomega) { + obj, err := getClusterExtension(ctx, packageName) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + log.Print("Current conditions:") + logAllConditions(obj) + + progCond, progFound := getCondition(obj, "Progressing") + if progFound { + reason, _ := progCond["reason"].(string) + message, _ := progCond["message"].(string) + g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(), + "ClusterExtension has terminal error: reason=%s message=%s", reason, message) + } + + instCond, instFound := getCondition(obj, "Installed") + g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present") + status, _ := instCond["status"].(string) + g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") + }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) + + ginkgo.By("Checking installed bundle info") + obj, err := getClusterExtension(ctx, packageName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bundleName, bundleVersion, found := getInstalledBundle(obj) + gomega.Expect(found).To(gomega.BeTrue()) + log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion) + }) + + ginkgo.It("should have controller-manager pod running after migration", func() { + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "control-plane=controller-manager", + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + log.Printf("Controller-manager pod %s is Running", pod.Name) + return true, nil + } + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running") + }) + + ginkgo.AfterAll(func() { + if !migrate { + return + } + ginkgo.By("Cleaning up migration test resources") + _ = deleteClusterExtension(ctx, packageName) + + gomega.Eventually(func() bool { + _, err := getClusterExtension(ctx, packageName) + return apierrors.IsNotFound(err) + }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) + + if createdCatalog { + deleteClusterCatalog(ctx, catalogName) + } + cleanupClusterRoleBinding(ctx, serviceAccountName) + }) +}) diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go index 7494879baaf..e5e94e4b9ab 100644 --- a/tests/olmv1/olmv1_suite_test.go +++ b/tests/olmv1/olmv1_suite_test.go @@ -32,6 +32,7 @@ var ( catalogImage string serviceAccountName string artifactDir string + migrate bool createdCatalog bool @@ -59,8 +60,9 @@ func init() { flag.StringVar(&upgradeVersion, "upgrade-version", "", "Version to upgrade to (optional)") flag.StringVar(&catalogName, "catalog", "oadp-olmv1-test-catalog", "ClusterCatalog name to create or reference") flag.StringVar(&catalogImage, "catalog-image", "", "Catalog image to use for creating a ClusterCatalog (required when package is not in default catalogs)") - flag.StringVar(&serviceAccountName, "service-account", "oadp-olmv1-installer", "ServiceAccount name for ClusterExtension") + flag.StringVar(&serviceAccountName, "service-account", "oadp-operator-installer", "ServiceAccount name for ClusterExtension") flag.StringVar(&artifactDir, "artifact_dir", "/tmp", "Directory for test artifacts") + flag.BoolVar(&migrate, "migrate", false, "Run OLMv0-to-OLMv1 migration tests (expects pre-existing OLMv0 install)") } func TestOADPOLMv1(t *testing.T) { @@ -108,7 +110,7 @@ func ensureServiceAccount(ctx context.Context, name, ns string) { // ensureClusterAdminBinding grants cluster-admin to the installer SA. // This is intentionally broad for testing; production should use least-privilege RBAC. func ensureClusterAdminBinding(ctx context.Context, saName, ns string) { - bindingName := saName + "-cluster-admin" + bindingName := saName + "-binding" crb := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{Name: bindingName}, RoleRef: rbacv1.RoleRef{ @@ -257,7 +259,7 @@ func crdExists(ctx context.Context, name string) (bool, error) { } func cleanupClusterRoleBinding(ctx context.Context, saName string) { - bindingName := saName + "-cluster-admin" + bindingName := saName + "-binding" err := kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, bindingName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { log.Printf("Warning: failed to delete ClusterRoleBinding %s: %v", bindingName, err) From 82857562c6ff63280ed720f1c78ef31c01cbea14 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Thu, 7 May 2026 10:19:37 -0400 Subject: [PATCH 05/16] Add OLMv0 remnant cleanup and CatalogSource migration to OLMv1 tests OLMv1 cannot adopt resources created by OLMv0. Add cleanup step that deletes all olm.managed=true labeled resources (ServiceAccounts, Roles, RoleBindings, Deployments, ClusterRoles, ClusterRoleBindings) before ClusterExtension install. Also add CatalogSource image detection to migrate custom FBC catalogs to ClusterCatalog, and update Makefile migration target with matching phases. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- Makefile | 39 ++++++++- tests/olmv1/olmv1_migrate_test.go | 135 ++++++++++++++++++++++++++++-- 2 files changed, 162 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 0479ee7578c..de1a91cfc81 100644 --- a/Makefile +++ b/Makefile @@ -1115,7 +1115,17 @@ generate-olmv1-manifest: ## Generate OLMv1 install manifest (Namespace, SA, CRB, .PHONY: upgrade-v0-to-olmv1 upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to OLMv1 (ClusterExtension). Requires OCP 4.20+. $(OC_CLI) whoami - @echo "=== Phase 1: Removing OLMv0 resources ===" + @echo "=== Phase 1: Capturing CatalogSource image for ClusterCatalog migration ===" + @# FBC image format is identical between OLMv0 CatalogSource and OLMv1 ClusterCatalog + @CATALOG_IMG=$${OLMV1_CATALOG_IMAGE:-$$($(OC_CLI) get catalogsource $(CATALOG_SOURCE_NAME) -n $(CATALOG_SOURCE_NAMESPACE) -o jsonpath='{.spec.image}' 2>/dev/null)}; \ + if [ -n "$$CATALOG_IMG" ]; then \ + echo "Captured catalog image: $$CATALOG_IMG"; \ + echo "$$CATALOG_IMG" > /tmp/oadp-migrate-catalog-image; \ + else \ + echo "No custom CatalogSource found — will use default ClusterCatalogs"; \ + rm -f /tmp/oadp-migrate-catalog-image; \ + fi + @echo "=== Phase 2: Removing OLMv0 resources ===" -$(OC_CLI) delete subscription oadp-operator -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true -$(OC_CLI) get subscription -n $(OADP_TEST_NAMESPACE) -o name 2>/dev/null | \ xargs -I {} sh -c '$(OC_CLI) get {} -n $(OADP_TEST_NAMESPACE) -o jsonpath='"'"'{.metadata.name}{"\t"}{.spec.source}{"\n"}'"'"' 2>/dev/null' | \ @@ -1126,14 +1136,35 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to xargs -I {} $(OC_CLI) delete {} -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true -$(OC_CLI) delete operatorgroup oadp-operator-group -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true -$(OC_CLI) delete catalogsource $(CATALOG_SOURCE_NAME) -n $(CATALOG_SOURCE_NAMESPACE) --ignore-not-found=true - @echo "=== Phase 2: Removing orphaned OADP/Velero CRDs ===" + @echo "=== Phase 3: Removing orphaned OADP/Velero CRDs ===" # OLMv1 cannot adopt CRDs it did not create -$(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io' | \ xargs -r $(OC_CLI) delete --ignore-not-found=true || true - @echo "=== Phase 3: Applying OLMv1 manifest ===" + @echo "=== Phase 4: Creating ClusterCatalog (if custom catalog was detected) ===" + @if [ -f /tmp/oadp-migrate-catalog-image ]; then \ + CATALOG_IMG=$$(cat /tmp/oadp-migrate-catalog-image); \ + echo "Creating ClusterCatalog $(OLMV1_CATALOG) from image $$CATALOG_IMG"; \ + printf '%s\n' \ + 'apiVersion: olm.operatorframework.io/v1' \ + 'kind: ClusterCatalog' \ + 'metadata:' \ + ' name: $(OLMV1_CATALOG)' \ + 'spec:' \ + ' source:' \ + ' type: Image' \ + ' image:' \ + " ref: $$CATALOG_IMG" \ + | $(OC_CLI) apply -f -; \ + echo "Waiting for ClusterCatalog to be serving..."; \ + $(OC_CLI) wait clustercatalog/$(OLMV1_CATALOG) --for=condition=Serving=True --timeout=120s; \ + rm -f /tmp/oadp-migrate-catalog-image; \ + else \ + echo "Skipping — no custom catalog to migrate"; \ + fi + @echo "=== Phase 5: Applying OLMv1 manifest ===" $(MAKE) generate-olmv1-manifest $(OC_CLI) apply -f $(OLMV1_MANIFEST) - @echo "=== Phase 4: Waiting for ClusterExtension Installed=True ===" + @echo "=== Phase 6: Waiting for ClusterExtension Installed=True ===" $(OC_CLI) wait clusterextension/$(OLMV1_PACKAGE) \ --for=condition=Installed=True --timeout=600s @echo "Migration complete." diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index f5fc135fc87..b9351aa66a7 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -11,6 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -30,6 +31,13 @@ var ( Version: "v1", Resource: "operatorgroups", } + catalogSourceGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "catalogsources", + } + + migratedCatalogImage string ) var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo.Label("olmv1-migrate"), func() { @@ -48,6 +56,17 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. for _, sub := range subs.Items { log.Printf("Found OLMv0 Subscription: %s", sub.GetName()) } + + ginkgo.By("Capturing CatalogSource image for ClusterCatalog migration") + migratedCatalogImage = catalogImage + if migratedCatalogImage == "" { + migratedCatalogImage = detectCatalogSourceImage(ctx, subs.Items) + } + if migratedCatalogImage != "" { + log.Printf("Will create ClusterCatalog from CatalogSource image: %s", migratedCatalogImage) + } else { + log.Print("No custom CatalogSource detected — will rely on default ClusterCatalogs") + } }) ginkgo.It("should remove OLMv0 Subscriptions", func() { @@ -85,10 +104,9 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. }, 2*time.Minute, 5*time.Second).Should(gomega.Equal(0)) }) - ginkgo.It("should remove OLMv0 OperatorGroup", func() { + ginkgo.It("should remove OLMv0 OperatorGroup and CatalogSource", func() { ogs, err := dynamicClient.Resource(operatorGroupGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - for _, og := range ogs.Items { ginkgo.By(fmt.Sprintf("Deleting OperatorGroup %s", og.GetName())) err := dynamicClient.Resource(operatorGroupGVR).Namespace(namespace).Delete(ctx, og.GetName(), metav1.DeleteOptions{}) @@ -96,6 +114,19 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } + + ginkgo.By("Deleting custom CatalogSources (preserving defaults)") + csList, err := dynamicClient.Resource(catalogSourceGVR).Namespace("openshift-marketplace").List(ctx, metav1.ListOptions{}) + if err == nil { + for _, cs := range csList.Items { + name := cs.GetName() + if isDefaultCatalogSource(name) { + continue + } + ginkgo.By(fmt.Sprintf("Deleting CatalogSource %s", name)) + _ = dynamicClient.Resource(catalogSourceGVR).Namespace("openshift-marketplace").Delete(ctx, name, metav1.DeleteOptions{}) + } + } }) ginkgo.It("should clean orphaned OADP/Velero CRDs", func() { @@ -103,18 +134,71 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. cleanupOrphanedCRDs(ctx) }) + ginkgo.It("should clean OLMv0 remnant resources that OLMv1 cannot adopt", func() { + olmSelector := metav1.ListOptions{LabelSelector: "olm.managed=true"} + + ginkgo.By("Deleting OLMv0-managed namespace-scoped resources") + sas, _ := kubeClient.CoreV1().ServiceAccounts(namespace).List(ctx, olmSelector) + if sas != nil { + for _, sa := range sas.Items { + log.Printf("Deleting remnant ServiceAccount %s/%s", namespace, sa.Name) + _ = kubeClient.CoreV1().ServiceAccounts(namespace).Delete(ctx, sa.Name, metav1.DeleteOptions{}) + } + } + roles, _ := kubeClient.RbacV1().Roles(namespace).List(ctx, olmSelector) + if roles != nil { + for _, r := range roles.Items { + log.Printf("Deleting remnant Role %s/%s", namespace, r.Name) + _ = kubeClient.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}) + } + } + rbs, _ := kubeClient.RbacV1().RoleBindings(namespace).List(ctx, olmSelector) + if rbs != nil { + for _, rb := range rbs.Items { + log.Printf("Deleting remnant RoleBinding %s/%s", namespace, rb.Name) + _ = kubeClient.RbacV1().RoleBindings(namespace).Delete(ctx, rb.Name, metav1.DeleteOptions{}) + } + } + deploys, _ := kubeClient.AppsV1().Deployments(namespace).List(ctx, olmSelector) + if deploys != nil { + for _, d := range deploys.Items { + log.Printf("Deleting remnant Deployment %s/%s", namespace, d.Name) + _ = kubeClient.AppsV1().Deployments(namespace).Delete(ctx, d.Name, metav1.DeleteOptions{}) + } + } + + ginkgo.By("Deleting OLMv0-managed cluster-scoped resources") + crs, _ := kubeClient.RbacV1().ClusterRoles().List(ctx, olmSelector) + if crs != nil { + for _, cr := range crs.Items { + log.Printf("Deleting remnant ClusterRole %s", cr.Name) + _ = kubeClient.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}) + } + } + crbs, _ := kubeClient.RbacV1().ClusterRoleBindings().List(ctx, olmSelector) + if crbs != nil { + for _, crb := range crbs.Items { + log.Printf("Deleting remnant ClusterRoleBinding %s", crb.Name) + _ = kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}) + } + } + }) + + ginkgo.It("should create ClusterCatalog from migrated CatalogSource image", func() { + if migratedCatalogImage == "" { + ginkgo.Skip("No custom catalog image to migrate — using default ClusterCatalogs") + } + ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, migratedCatalogImage)) + ensureClusterCatalog(ctx, catalogName, migratedCatalogImage) + waitForClusterCatalogServing(ctx, catalogName) + }) + ginkgo.It("should install OADP via OLMv1 ClusterExtension", func() { ginkgo.By("Setting up installer ServiceAccount and RBAC") ensureNamespace(ctx, namespace) ensureServiceAccount(ctx, serviceAccountName, namespace) ensureClusterAdminBinding(ctx, serviceAccountName, namespace) - if catalogImage != "" { - ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s", catalogName)) - ensureClusterCatalog(ctx, catalogName, catalogImage) - waitForClusterCatalogServing(ctx, catalogName) - } - ginkgo.By("Creating the ClusterExtension") ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) @@ -190,3 +274,38 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. cleanupClusterRoleBinding(ctx, serviceAccountName) }) }) + +// detectCatalogSourceImage finds the catalog image from the OLMv0 Subscription's +// CatalogSource. This allows migrating a custom catalog to a ClusterCatalog +// using the same FBC image (same format, different API). +func detectCatalogSourceImage(ctx context.Context, subs []unstructured.Unstructured) string { + for _, sub := range subs { + source, _, _ := unstructured.NestedString(sub.Object, "spec", "source") + sourceNS, _, _ := unstructured.NestedString(sub.Object, "spec", "sourceNamespace") + if source == "" || sourceNS == "" { + continue + } + if isDefaultCatalogSource(source) { + continue + } + cs, err := dynamicClient.Resource(catalogSourceGVR).Namespace(sourceNS).Get(ctx, source, metav1.GetOptions{}) + if err != nil { + log.Printf("Warning: CatalogSource %s/%s not found: %v", sourceNS, source, err) + continue + } + image, _, _ := unstructured.NestedString(cs.Object, "spec", "image") + if image != "" { + log.Printf("Detected CatalogSource %s/%s image: %s", sourceNS, source, image) + return image + } + } + return "" +} + +func isDefaultCatalogSource(name string) bool { + switch name { + case "redhat-operators", "certified-operators", "community-operators", "redhat-marketplace": + return true + } + return false +} From 523408089c7cea75fed9b7278b59964509b60e29 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Thu, 7 May 2026 10:26:56 -0400 Subject: [PATCH 06/16] Add olm.managed=true cleanup to Makefile migration target The upgrade-v0-to-olmv1 Makefile target was missing the OLMv0 remnant cleanup step. OLMv1 refuses to install when it finds pre-existing resources labeled olm.managed=true (ServiceAccounts, Roles, ClusterRoles, etc). Add Phase 3b to delete these before ClusterExtension creation. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index de1a91cfc81..b10aec89f13 100644 --- a/Makefile +++ b/Makefile @@ -1140,6 +1140,10 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to # OLMv1 cannot adopt CRDs it did not create -$(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io' | \ xargs -r $(OC_CLI) delete --ignore-not-found=true || true + @echo "=== Phase 3b: Removing OLMv0-managed remnant resources ===" + # OLMv1 cannot adopt resources created by OLMv0's CSV (labeled olm.managed=true) + -$(OC_CLI) delete sa,roles,rolebindings,deployments -l olm.managed=true -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + -$(OC_CLI) delete clusterroles,clusterrolebindings -l olm.managed=true --ignore-not-found=true || true @echo "=== Phase 4: Creating ClusterCatalog (if custom catalog was detected) ===" @if [ -f /tmp/oadp-migrate-catalog-image ]; then \ CATALOG_IMG=$$(cat /tmp/oadp-migrate-catalog-image); \ From f9cfb45e0c870b8bfdc7d78812103c95aa5e855d Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Thu, 7 May 2026 11:16:42 -0400 Subject: [PATCH 07/16] Add CI compatibility, version verification, and docs for OLMv1 migration test - Fix CatalogSource cleanup to scan both openshift-marketplace and operator namespace (CI places CatalogSource in operator namespace via optional-operators-subscribe) - Add OLMv0 CSV version capture before migration and verify same version installed after OLMv1 ClusterExtension install - Verify installed bundle came from expected catalog, not community default - Pin ClusterExtension to specific ClusterCatalog via selector.matchLabels - Build fresh operator/bundle/catalog images in upgrade-v0-to-olmv1 to avoid expired ttl.sh images - Add verbose diagnostics for ClusterCatalog failures (image ref, catalogd logs) - Append test-upgrade-v0-to-olmv1 to test-e2e target for Prow presubmit - Add usage comments for local and CI/Prow workflows Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- Makefile | 119 +++++++++++++++++++++++++++--- tests/olmv1/olmv1_migrate_test.go | 103 +++++++++++++++++++++----- tests/olmv1/olmv1_suite_test.go | 23 +++++- 3 files changed, 209 insertions(+), 36 deletions(-) diff --git a/Makefile b/Makefile index b10aec89f13..1e27c9810a6 100644 --- a/Makefile +++ b/Makefile @@ -999,7 +999,25 @@ test-e2e: test-e2e-setup install-ginkgo ## Run E2E tests against OADP operator i fi; \ ./tests/e2e/scripts/analyze_failures.sh $${EXIT_CODE:-0}; \ fi; \ - exit $${EXIT_CODE:-0} + echo $${EXIT_CODE:-0} > /tmp/oadp-e2e-exit-code + # OLMv0→OLMv1 migration test runs after e2e. It removes the OLMv0 install + # (Subscription, CSV, CRDs) and reinstalls via OLMv1 ClusterExtension. + # + # CI: Prow's optional-operators-subscribe already created the OLMv0 install. + # The test auto-detects the CatalogSource image from the Subscription. + # Local: Requires OLMv0 install first (make deploy-olm). The test auto-detects + # the CatalogSource, but if you used a custom catalog you may need to set + # OLMV1_CATALOG_IMAGE. If this step fails locally, check: + # - OCP 4.20+ is required (OLMv1 APIs must exist) + # - ttl.sh catalog images expire after TTL_DURATION (default 1h) + # - Run "make test-upgrade-v0-to-olmv1" standalone to iterate on failures + # + # Migration failure does not mask the e2e exit code. Migration results are + # captured separately in junit_olmv1_report.xml. + -$(MAKE) test-upgrade-v0-to-olmv1 + @E2E_EXIT=$$(cat /tmp/oadp-e2e-exit-code 2>/dev/null || echo 0); \ + rm -f /tmp/oadp-e2e-exit-code; \ + exit $$E2E_EXIT .PHONY: test-e2e-cleanup test-e2e-cleanup: login-required @@ -1018,6 +1036,40 @@ test-e2e-cleanup: login-required rm -rf $(SETTINGS_TMP) ##@ OLMv1 Tests +# +# OLMv1 migration and lifecycle tests validate installing OADP via OLMv1 +# ClusterExtension, including migrating from an existing OLMv0 (Subscription/CSV) +# install. Two complementary approaches exist: +# +# Makefile targets (upgrade-v0-to-olmv1): +# Shell-based migration using oc commands. Useful for local development and +# quick iteration. Builds fresh operator/bundle/catalog images, removes OLMv0 +# resources, creates a ClusterCatalog, and installs via ClusterExtension. +# +# Go tests (test-upgrade-v0-to-olmv1): +# Ginkgo test suite that performs the same migration with detailed assertions, +# version verification, and structured JUnit output for CI reporting. +# +# Local usage: +# make deploy-olm # Install OADP via OLMv0 first +# make upgrade-v0-to-olmv1 # Shell-based migration (builds fresh images) +# # or +# make test-upgrade-v0-to-olmv1 # Go test migration (needs catalog-image or +# # pre-existing CatalogSource to auto-detect) +# +# CI/Prow usage (presubmit with optional-operators-subscribe workflow): +# 1. ci-operator builds the test image from build/ci-Dockerfile +# 2. optional-operators-subscribe creates a CatalogSource (from OO_INDEX) + +# OperatorGroup + Subscription in OO_INSTALL_NAMESPACE (openshift-adp), +# giving us a running OLMv0 install +# 3. The test step runs: make test-upgrade-v0-to-olmv1 +# 4. The Go test auto-detects the CatalogSource image from the Subscription, +# removes all OLMv0 resources, creates a ClusterCatalog from the same image, +# and installs via ClusterExtension +# 5. Version is verified: OLMv0 CSV version must match OLMv1 installed version +# +# The Go test is compatible with any CatalogSource placement (openshift-marketplace +# or operator namespace) — it reads spec.sourceNamespace from the Subscription. OLMV1_PACKAGE ?= oadp-operator OLMV1_NAMESPACE ?= $(OADP_TEST_NAMESPACE) @@ -1104,6 +1156,11 @@ generate-olmv1-manifest: ## Generate OLMv1 install manifest (Namespace, SA, CRB, ' catalog:' \ ' packageName: $(OLMV1_PACKAGE)' \ > $(OLMV1_MANIFEST) + @if [ -n "$(OLMV1_PIN_CATALOG)" ]; then \ + printf ' selector:\n' >> $(OLMV1_MANIFEST); \ + printf ' matchLabels:\n' >> $(OLMV1_MANIFEST); \ + printf ' olm.operatorframework.io/metadata.name: %s\n' '$(OLMV1_PIN_CATALOG)' >> $(OLMV1_MANIFEST); \ + fi @if [ -n "$(OLMV1_CHANNEL)" ]; then \ printf ' channel: %s\n' '$(OLMV1_CHANNEL)' >> $(OLMV1_MANIFEST); \ fi @@ -1112,18 +1169,32 @@ generate-olmv1-manifest: ## Generate OLMv1 install manifest (Namespace, SA, CRB, fi @echo "Generated $(OLMV1_MANIFEST)" +# upgrade-v0-to-olmv1: Shell-based OLMv0→OLMv1 migration for local development. +# Builds fresh operator+bundle+catalog images (ttl.sh), removes OLMv0 resources, +# creates ClusterCatalog, and installs via ClusterExtension. +# Skip the build by passing OLMV1_CATALOG_IMAGE=. +# Usage: make deploy-olm && make upgrade-v0-to-olmv1 .PHONY: upgrade-v0-to-olmv1 +upgrade-v0-to-olmv1: UPGRADE_OPERATOR_IMAGE?=ttl.sh/oadp-operator-$(GIT_REV):$(TTL_DURATION) +upgrade-v0-to-olmv1: UPGRADE_BUNDLE_IMAGE?=ttl.sh/oadp-operator-bundle-$(GIT_REV):$(TTL_DURATION) +upgrade-v0-to-olmv1: UPGRADE_CATALOG_IMAGE?=ttl.sh/oadp-operator-catalog-$(GIT_REV):$(TTL_DURATION) +upgrade-v0-to-olmv1: UPGRADE_TMP:=$(shell mktemp -d)/ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to OLMv1 (ClusterExtension). Requires OCP 4.20+. $(OC_CLI) whoami - @echo "=== Phase 1: Capturing CatalogSource image for ClusterCatalog migration ===" - @# FBC image format is identical between OLMv0 CatalogSource and OLMv1 ClusterCatalog - @CATALOG_IMG=$${OLMV1_CATALOG_IMAGE:-$$($(OC_CLI) get catalogsource $(CATALOG_SOURCE_NAME) -n $(CATALOG_SOURCE_NAMESPACE) -o jsonpath='{.spec.image}' 2>/dev/null)}; \ - if [ -n "$$CATALOG_IMG" ]; then \ - echo "Captured catalog image: $$CATALOG_IMG"; \ - echo "$$CATALOG_IMG" > /tmp/oadp-migrate-catalog-image; \ + @echo "=== Phase 1: Building fresh catalog image ===" + @if [ -n "$(OLMV1_CATALOG_IMAGE)" ]; then \ + echo "Using provided catalog image: $(OLMV1_CATALOG_IMAGE)"; \ + echo "$(OLMV1_CATALOG_IMAGE)" > /tmp/oadp-migrate-catalog-image; \ else \ - echo "No custom CatalogSource found — will use default ClusterCatalogs"; \ - rm -f /tmp/oadp-migrate-catalog-image; \ + echo "Building operator, bundle, and catalog images (avoids expired ttl.sh images)..."; \ + echo " Operator: $(UPGRADE_OPERATOR_IMAGE)"; \ + echo " Bundle: $(UPGRADE_BUNDLE_IMAGE)"; \ + echo " Catalog: $(UPGRADE_CATALOG_IMAGE)"; \ + cp -r . $(UPGRADE_TMP) && cd $(UPGRADE_TMP) && \ + IMG=$(UPGRADE_OPERATOR_IMAGE) BUNDLE_IMG=$(UPGRADE_BUNDLE_IMAGE) BUNDLE_IMGS=$(UPGRADE_BUNDLE_IMAGE) CATALOG_IMG=$(UPGRADE_CATALOG_IMAGE) \ + make docker-build docker-push bundle bundle-build bundle-push catalog-build catalog-push; \ + chmod -R 777 $(UPGRADE_TMP) && rm -rf $(UPGRADE_TMP); \ + echo "$(UPGRADE_CATALOG_IMAGE)" > /tmp/oadp-migrate-catalog-image; \ fi @echo "=== Phase 2: Removing OLMv0 resources ===" -$(OC_CLI) delete subscription oadp-operator -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true @@ -1136,6 +1207,8 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to xargs -I {} $(OC_CLI) delete {} -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true -$(OC_CLI) delete operatorgroup oadp-operator-group -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true -$(OC_CLI) delete catalogsource $(CATALOG_SOURCE_NAME) -n $(CATALOG_SOURCE_NAMESPACE) --ignore-not-found=true + # CI (optional-operators-subscribe) may place CatalogSource in the operator namespace + -$(OC_CLI) delete catalogsource --all -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true @echo "=== Phase 3: Removing orphaned OADP/Velero CRDs ===" # OLMv1 cannot adopt CRDs it did not create -$(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io' | \ @@ -1144,7 +1217,7 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to # OLMv1 cannot adopt resources created by OLMv0's CSV (labeled olm.managed=true) -$(OC_CLI) delete sa,roles,rolebindings,deployments -l olm.managed=true -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true -$(OC_CLI) delete clusterroles,clusterrolebindings -l olm.managed=true --ignore-not-found=true || true - @echo "=== Phase 4: Creating ClusterCatalog (if custom catalog was detected) ===" + @echo "=== Phase 4: Creating ClusterCatalog ===" @if [ -f /tmp/oadp-migrate-catalog-image ]; then \ CATALOG_IMG=$$(cat /tmp/oadp-migrate-catalog-image); \ echo "Creating ClusterCatalog $(OLMV1_CATALOG) from image $$CATALOG_IMG"; \ @@ -1160,13 +1233,27 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to " ref: $$CATALOG_IMG" \ | $(OC_CLI) apply -f -; \ echo "Waiting for ClusterCatalog to be serving..."; \ - $(OC_CLI) wait clustercatalog/$(OLMV1_CATALOG) --for=condition=Serving=True --timeout=120s; \ + if ! $(OC_CLI) wait clustercatalog/$(OLMV1_CATALOG) --for=condition=Serving=True --timeout=120s; then \ + echo ""; \ + echo "ERROR: ClusterCatalog $(OLMV1_CATALOG) failed to reach Serving state."; \ + echo "Catalog image: $$CATALOG_IMG"; \ + echo "This usually means the catalog image is expired or cannot be pulled."; \ + echo "If using ttl.sh, images expire after TTL_DURATION (default: 1h)."; \ + echo ""; \ + echo "=== ClusterCatalog status ==="; \ + $(OC_CLI) get clustercatalog $(OLMV1_CATALOG) -o yaml 2>/dev/null || true; \ + echo "=== catalogd pod logs (last 30 lines) ==="; \ + $(OC_CLI) logs -n openshift-catalogd -l app.kubernetes.io/name=catalogd --tail=30 2>/dev/null || true; \ + rm -f /tmp/oadp-migrate-catalog-image; \ + exit 1; \ + fi; \ rm -f /tmp/oadp-migrate-catalog-image; \ else \ echo "Skipping — no custom catalog to migrate"; \ fi @echo "=== Phase 5: Applying OLMv1 manifest ===" - $(MAKE) generate-olmv1-manifest + -$(OC_CLI) delete clusterextension $(OLMV1_PACKAGE) --ignore-not-found=true + $(MAKE) generate-olmv1-manifest OLMV1_PIN_CATALOG=$(OLMV1_CATALOG) $(OC_CLI) apply -f $(OLMV1_MANIFEST) @echo "=== Phase 6: Waiting for ClusterExtension Installed=True ===" $(OC_CLI) wait clusterextension/$(OLMV1_PACKAGE) \ @@ -1174,6 +1261,14 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to @echo "Migration complete." $(OC_CLI) get clusterextension $(OLMV1_PACKAGE) +# test-upgrade-v0-to-olmv1: Ginkgo-based OLMv0→OLMv1 migration test with assertions. +# Expects a pre-existing OLMv0 install (Subscription + CSV running). +# +# Local: make deploy-olm && make test-upgrade-v0-to-olmv1 OLMV1_CATALOG_IMAGE= +# CI: Prow's optional-operators-subscribe installs OLMv0 first, then this target +# runs. The test auto-detects the CatalogSource image from the Subscription +# (no OLMV1_CATALOG_IMAGE needed — works with CI-created CatalogSource in any +# namespace). Verifies same version installed before/after migration. .PHONY: test-upgrade-v0-to-olmv1 test-upgrade-v0-to-olmv1: login-required install-ginkgo ## Test OLMv0->OLMv1 migration path. Expects a pre-existing OLMv0 OADP install (run make deploy-olm first). ginkgo run -mod=mod $(OLMV1_GINKGO_FLAGS) \ diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index b9351aa66a7..f52d621bbf6 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -1,3 +1,23 @@ +// OLMv0→OLMv1 migration test: validates migrating a running OLMv0 OADP install +// (Subscription + CSV) to OLMv1 (ClusterExtension). +// +// Run locally: +// make deploy-olm # install via OLMv0 +// make test-upgrade-v0-to-olmv1 OLMV1_CATALOG_IMAGE= # run migration test +// +// Run in CI (Prow presubmit with optional-operators-subscribe workflow): +// Prow creates CatalogSource + Subscription in openshift-adp from the CI-built +// index image. This test auto-detects the CatalogSource image from the Subscription's +// spec.source/spec.sourceNamespace, so no OLMV1_CATALOG_IMAGE is needed. +// +// The test: +// 1. Captures OLMv0 CSV version before migration +// 2. Removes Subscriptions, CSVs, OperatorGroup, CatalogSources, orphaned CRDs, +// and olm.managed=true remnant resources +// 3. Creates a ClusterCatalog from the detected (or provided) catalog image +// 4. Installs OADP via OLMv1 ClusterExtension +// 5. Verifies the installed version matches the pre-migration OLMv0 version +// 6. Verifies the bundle came from the expected catalog (not a default/community one) package olmv1_test import ( @@ -37,7 +57,10 @@ var ( Resource: "catalogsources", } - migratedCatalogImage string + migratedCatalogImage string + migratedCatalogSourceName string + migratedCatalogSourceNS string + olmv0InstalledVersion string ) var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo.Label("olmv1-migrate"), func() { @@ -57,13 +80,29 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. log.Printf("Found OLMv0 Subscription: %s", sub.GetName()) } - ginkgo.By("Capturing CatalogSource image for ClusterCatalog migration") + ginkgo.By("Capturing OLMv0 installed version from CSV") + csvs, err := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err == nil { + for _, csv := range csvs.Items { + csvVersion, _, _ := unstructured.NestedString(csv.Object, "spec", "version") + if csvVersion != "" { + olmv0InstalledVersion = csvVersion + log.Printf("OLMv0 installed CSV: %s version: %s", csv.GetName(), csvVersion) + break + } + } + } + + ginkgo.By("Capturing CatalogSource for ClusterCatalog migration") migratedCatalogImage = catalogImage if migratedCatalogImage == "" { - migratedCatalogImage = detectCatalogSourceImage(ctx, subs.Items) + migratedCatalogImage, migratedCatalogSourceName, migratedCatalogSourceNS = detectCatalogSource(ctx, subs.Items) } if migratedCatalogImage != "" { log.Printf("Will create ClusterCatalog from CatalogSource image: %s", migratedCatalogImage) + if migratedCatalogSourceName != "" { + log.Printf("CatalogSource to clean up: %s/%s", migratedCatalogSourceNS, migratedCatalogSourceName) + } } else { log.Print("No custom CatalogSource detected — will rely on default ClusterCatalogs") } @@ -115,16 +154,23 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. } } - ginkgo.By("Deleting custom CatalogSources (preserving defaults)") - csList, err := dynamicClient.Resource(catalogSourceGVR).Namespace("openshift-marketplace").List(ctx, metav1.ListOptions{}) - if err == nil { + ginkgo.By("Deleting CatalogSources used by the operator") + if migratedCatalogSourceName != "" && migratedCatalogSourceNS != "" { + ginkgo.By(fmt.Sprintf("Deleting CatalogSource %s/%s (detected from Subscription)", migratedCatalogSourceNS, migratedCatalogSourceName)) + _ = dynamicClient.Resource(catalogSourceGVR).Namespace(migratedCatalogSourceNS).Delete(ctx, migratedCatalogSourceName, metav1.DeleteOptions{}) + } + for _, csNS := range []string{"openshift-marketplace", namespace} { + csList, err := dynamicClient.Resource(catalogSourceGVR).Namespace(csNS).List(ctx, metav1.ListOptions{}) + if err != nil { + continue + } for _, cs := range csList.Items { name := cs.GetName() if isDefaultCatalogSource(name) { continue } - ginkgo.By(fmt.Sprintf("Deleting CatalogSource %s", name)) - _ = dynamicClient.Resource(catalogSourceGVR).Namespace("openshift-marketplace").Delete(ctx, name, metav1.DeleteOptions{}) + ginkgo.By(fmt.Sprintf("Deleting CatalogSource %s/%s", csNS, name)) + _ = dynamicClient.Resource(catalogSourceGVR).Namespace(csNS).Delete(ctx, name, metav1.DeleteOptions{}) } } }) @@ -230,12 +276,27 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) - ginkgo.By("Checking installed bundle info") + ginkgo.By("Verifying installed bundle version and catalog source") obj, err := getClusterExtension(ctx, packageName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) bundleName, bundleVersion, found := getInstalledBundle(obj) - gomega.Expect(found).To(gomega.BeTrue()) - log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion) + gomega.Expect(found).To(gomega.BeTrue(), "ClusterExtension should have an installed bundle") + log.Printf("OLMv1 installed bundle: name=%s version=%s", bundleName, bundleVersion) + + if olmv0InstalledVersion != "" { + log.Printf("Version check: OLMv0=%s OLMv1=%s", olmv0InstalledVersion, bundleVersion) + gomega.Expect(bundleVersion).To(gomega.Equal(olmv0InstalledVersion), + "OLMv1 installed version should match OLMv0 version (was %s, got %s)", olmv0InstalledVersion, bundleVersion) + } + + if migratedCatalogImage != "" { + instCond, instFound := getCondition(obj, "Installed") + gomega.Expect(instFound).To(gomega.BeTrue()) + installedMsg, _ := instCond["message"].(string) + log.Printf("Installed condition message: %s", installedMsg) + gomega.Expect(installedMsg).NotTo(gomega.ContainSubstring("openshift-community-operators"), + "Bundle should NOT come from community catalog — expected custom catalog %s", catalogName) + } }) ginkgo.It("should have controller-manager pod running after migration", func() { @@ -275,10 +336,12 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. }) }) -// detectCatalogSourceImage finds the catalog image from the OLMv0 Subscription's -// CatalogSource. This allows migrating a custom catalog to a ClusterCatalog -// using the same FBC image (same format, different API). -func detectCatalogSourceImage(ctx context.Context, subs []unstructured.Unstructured) string { +// detectCatalogSource finds the custom CatalogSource from the OLMv0 Subscription. +// Returns the image, CatalogSource name, and namespace. The image can be used +// to create a ClusterCatalog (same FBC format, different API). The name and +// namespace are needed for cleanup since CI (optional-operators-subscribe) may +// place the CatalogSource in the operator namespace, not openshift-marketplace. +func detectCatalogSource(ctx context.Context, subs []unstructured.Unstructured) (image, name, ns string) { for _, sub := range subs { source, _, _ := unstructured.NestedString(sub.Object, "spec", "source") sourceNS, _, _ := unstructured.NestedString(sub.Object, "spec", "sourceNamespace") @@ -293,13 +356,13 @@ func detectCatalogSourceImage(ctx context.Context, subs []unstructured.Unstructu log.Printf("Warning: CatalogSource %s/%s not found: %v", sourceNS, source, err) continue } - image, _, _ := unstructured.NestedString(cs.Object, "spec", "image") - if image != "" { - log.Printf("Detected CatalogSource %s/%s image: %s", sourceNS, source, image) - return image + img, _, _ := unstructured.NestedString(cs.Object, "spec", "image") + if img != "" { + log.Printf("Detected CatalogSource %s/%s image: %s", sourceNS, source, img) + return img, source, sourceNS } } - return "" + return "", "", "" } func isDefaultCatalogSource(name string) bool { diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go index e5e94e4b9ab..f6b97ff109f 100644 --- a/tests/olmv1/olmv1_suite_test.go +++ b/tests/olmv1/olmv1_suite_test.go @@ -340,15 +340,30 @@ func waitForClusterCatalogServing(ctx context.Context, name string) { if !ok { continue } - if cond["type"] == "Serving" { - status, _ := cond["status"].(string) - reason, _ := cond["reason"].(string) + condType, _ := cond["type"].(string) + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + switch condType { + case "Serving": log.Printf("ClusterCatalog %s Serving: status=%s reason=%s", name, status, reason) + if status != "True" && message != "" { + log.Printf(" message: %s", message) + } return status == "True" + case "Progressing": + if reason == "Failed" || status == "False" { + imageRef, _, _ := unstructured.NestedString(obj.Object, "spec", "source", "image", "ref") + log.Printf("ClusterCatalog %s Progressing: status=%s reason=%s (image: %s)", name, status, reason, imageRef) + if message != "" { + log.Printf(" message: %s", message) + } + } } } return false - }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "ClusterCatalog should be Serving") + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), + "ClusterCatalog %s should be Serving — if using ttl.sh, the catalog image may have expired", name) } func deleteClusterCatalog(ctx context.Context, name string) { From 9de0e762c630daa8d7e8245f1ed1c986fcc0a23b Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Thu, 7 May 2026 11:18:15 -0400 Subject: [PATCH 08/16] Add generated OLMv1 manifest to .gitignore Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 05921057c15..eb346d683de 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,8 @@ go.work *~ # OADP +# Generated by `make generate-olmv1-manifest` — regenerated each run with current config +oadp-olmv1-manifest.yaml tests/e2e/e2e.test tests/e2e/templates/*.yaml .DS_Store From d30d922130d88c47a448203810b20e051649b4d1 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Thu, 7 May 2026 13:56:30 -0400 Subject: [PATCH 09/16] Address review feedback: fix manifest schema, scope cleanup, add mirror catalog target MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix generate-olmv1-manifest: channel (singular) → channels (array) per ClusterExtension API schema - Scope olm.managed=true cluster-wide RBAC cleanup to OADP-related resources only, avoiding deletion of other operators' resources in shared clusters - Replace Update with MergePatch for ClusterExtension upgrade to avoid 409 Conflict races with the controller - Validate existing ClusterCatalog image on AlreadyExists instead of silently returning - Add deploy-olmv1-mirror-catalog target for testing with productized index images when current OCP version lacks redhat-oadp-operator (e.g., 4.22) Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- Makefile | 47 +++++++++++++++++++++++++++++-- tests/olmv1/olmv1_install_test.go | 45 ++--------------------------- tests/olmv1/olmv1_migrate_test.go | 15 +++++++++- tests/olmv1/olmv1_suite_test.go | 8 +++++- 4 files changed, 69 insertions(+), 46 deletions(-) diff --git a/Makefile b/Makefile index 1e27c9810a6..f7e5adc556e 100644 --- a/Makefile +++ b/Makefile @@ -1109,6 +1109,44 @@ test-olmv1-cleanup: login-required ## Cleanup resources created by OLMv1 tests. $(OC_CLI) delete clusterrolebinding $(OLMV1_INSTALLER_BINDING) --ignore-not-found=true $(OC_CLI) delete sa $(OLMV1_SERVICE_ACCOUNT) -n $(OLMV1_NAMESPACE) --ignore-not-found=true +# deploy-olmv1-mirror-catalog: Deploy a ClusterCatalog from a productized index +# image for testing when the current OCP version's redhat-operator-index does +# not include redhat-oadp-operator (e.g., 4.22 only ships OLMv1-curated packages). +# +# Usage: +# make deploy-olmv1-mirror-catalog # defaults to v4.21 +# make deploy-olmv1-mirror-catalog OLMV1_MIRROR_INDEX=registry.redhat.io/redhat/redhat-operator-index:v4.20 +# make deploy-olmv1-mirror-catalog OLMV1_MIRROR_PACKAGE=redhat-oadp-operator +OLMV1_MIRROR_INDEX ?= registry.redhat.io/redhat/redhat-operator-index:v4.21 +OLMV1_MIRROR_CATALOG ?= oadp-v0-mirror-test-catalog +OLMV1_MIRROR_PACKAGE ?= redhat-oadp-operator + +.PHONY: deploy-olmv1-mirror-catalog +deploy-olmv1-mirror-catalog: login-required ## Deploy a ClusterCatalog from a productized index image for OLMv1 testing. + @echo "=== Deploying mirror ClusterCatalog from $(OLMV1_MIRROR_INDEX) ===" + @printf '%s\n' \ + 'apiVersion: olm.operatorframework.io/v1' \ + 'kind: ClusterCatalog' \ + 'metadata:' \ + ' name: $(OLMV1_MIRROR_CATALOG)' \ + 'spec:' \ + ' source:' \ + ' type: Image' \ + ' image:' \ + ' ref: $(OLMV1_MIRROR_INDEX)' \ + | $(OC_CLI) apply -f - + @echo "Waiting for ClusterCatalog $(OLMV1_MIRROR_CATALOG) to be serving..." + $(OC_CLI) wait clustercatalog/$(OLMV1_MIRROR_CATALOG) --for=condition=Serving=True --timeout=300s + @echo "" + @echo "Mirror catalog ready. Install $(OLMV1_MIRROR_PACKAGE) via OLMv1:" + @echo " make test-olmv1 OLMV1_PACKAGE=$(OLMV1_MIRROR_PACKAGE) OLMV1_CATALOG=$(OLMV1_MIRROR_CATALOG) OLMV1_CATALOG_IMAGE=$(OLMV1_MIRROR_INDEX)" + @echo " # or for migration test:" + @echo " make test-upgrade-v0-to-olmv1 OLMV1_PACKAGE=$(OLMV1_MIRROR_PACKAGE) OLMV1_CATALOG=$(OLMV1_MIRROR_CATALOG) OLMV1_CATALOG_IMAGE=$(OLMV1_MIRROR_INDEX)" + +.PHONY: undeploy-olmv1-mirror-catalog +undeploy-olmv1-mirror-catalog: login-required ## Remove the mirror ClusterCatalog. + $(OC_CLI) delete clustercatalog $(OLMV1_MIRROR_CATALOG) --ignore-not-found=true + OLMV1_MANIFEST ?= oadp-olmv1-manifest.yaml .PHONY: generate-olmv1-manifest @@ -1162,7 +1200,8 @@ generate-olmv1-manifest: ## Generate OLMv1 install manifest (Namespace, SA, CRB, printf ' olm.operatorframework.io/metadata.name: %s\n' '$(OLMV1_PIN_CATALOG)' >> $(OLMV1_MANIFEST); \ fi @if [ -n "$(OLMV1_CHANNEL)" ]; then \ - printf ' channel: %s\n' '$(OLMV1_CHANNEL)' >> $(OLMV1_MANIFEST); \ + printf ' channels:\n' >> $(OLMV1_MANIFEST); \ + printf ' - %s\n' '$(OLMV1_CHANNEL)' >> $(OLMV1_MANIFEST); \ fi @if [ -n "$(OLMV1_VERSION)" ]; then \ printf ' version: "%s"\n' '$(OLMV1_VERSION)' >> $(OLMV1_MANIFEST); \ @@ -1216,7 +1255,11 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to @echo "=== Phase 3b: Removing OLMv0-managed remnant resources ===" # OLMv1 cannot adopt resources created by OLMv0's CSV (labeled olm.managed=true) -$(OC_CLI) delete sa,roles,rolebindings,deployments -l olm.managed=true -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true - -$(OC_CLI) delete clusterroles,clusterrolebindings -l olm.managed=true --ignore-not-found=true || true + # Only delete cluster-scoped resources related to OADP (avoid breaking other operators in shared clusters) + -$(OC_CLI) get clusterroles -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)' | \ + xargs -r $(OC_CLI) delete --ignore-not-found=true || true + -$(OC_CLI) get clusterrolebindings -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)' | \ + xargs -r $(OC_CLI) delete --ignore-not-found=true || true @echo "=== Phase 4: Creating ClusterCatalog ===" @if [ -f /tmp/oadp-migrate-catalog-image ]; then \ CATALOG_IMG=$$(cat /tmp/oadp-migrate-catalog-image); \ diff --git a/tests/olmv1/olmv1_install_test.go b/tests/olmv1/olmv1_install_test.go index e5671f740e1..8fe780dfb59 100644 --- a/tests/olmv1/olmv1_install_test.go +++ b/tests/olmv1/olmv1_install_test.go @@ -11,6 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) const ( @@ -170,14 +171,8 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol previousBundleName, previousVersion, _ := getInstalledBundle(obj) log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion) - catalogSpec, _, _ := unstructuredNestedMap(obj.Object, "spec", "source", "catalog") - gomega.Expect(catalogSpec).NotTo(gomega.BeNil()) - catalogSpec["version"] = upgradeVersion - catalogSpec["upgradeConstraintPolicy"] = "SelfCertified" - err = unstructuredSetNestedMap(obj.Object, catalogSpec, "spec", "source", "catalog") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - _, err = dynamicClient.Resource(clusterExtensionGVR).Update(ctx, obj, metav1.UpdateOptions{}) + patch := []byte(fmt.Sprintf(`{"spec":{"source":{"catalog":{"version":"%s","upgradeConstraintPolicy":"SelfCertified"}}}}`, upgradeVersion)) + _, err = dynamicClient.Resource(clusterExtensionGVR).Patch(ctx, clusterExtensionName, types.MergePatchType, patch, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Printf("Patched ClusterExtension version to %s", upgradeVersion) @@ -228,37 +223,3 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol }) }) -func unstructuredNestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { - var current interface{} = obj - for _, field := range fields { - m, ok := current.(map[string]interface{}) - if !ok { - return nil, false, fmt.Errorf("expected map at field %s", field) - } - current, ok = m[field] - if !ok { - return nil, false, nil - } - } - result, ok := current.(map[string]interface{}) - if !ok { - return nil, false, fmt.Errorf("final value is not a map") - } - return result, true, nil -} - -func unstructuredSetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { - if len(fields) == 0 { - return fmt.Errorf("no fields specified") - } - current := obj - for _, field := range fields[:len(fields)-1] { - next, ok := current[field].(map[string]interface{}) - if !ok { - return fmt.Errorf("expected map at field %s", field) - } - current = next - } - current[fields[len(fields)-1]] = value - return nil -} diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index f52d621bbf6..a7aa4e80c4d 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -24,6 +24,7 @@ import ( "context" "fmt" "log" + "strings" "time" "github.com/onsi/ginkgo/v2" @@ -213,10 +214,13 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. } } - ginkgo.By("Deleting OLMv0-managed cluster-scoped resources") + ginkgo.By("Deleting OLMv0-managed cluster-scoped resources related to OADP") crs, _ := kubeClient.RbacV1().ClusterRoles().List(ctx, olmSelector) if crs != nil { for _, cr := range crs.Items { + if !isOADPRelatedResource(cr.Name, namespace) { + continue + } log.Printf("Deleting remnant ClusterRole %s", cr.Name) _ = kubeClient.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}) } @@ -224,6 +228,9 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. crbs, _ := kubeClient.RbacV1().ClusterRoleBindings().List(ctx, olmSelector) if crbs != nil { for _, crb := range crbs.Items { + if !isOADPRelatedResource(crb.Name, namespace) { + continue + } log.Printf("Deleting remnant ClusterRoleBinding %s", crb.Name) _ = kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}) } @@ -372,3 +379,9 @@ func isDefaultCatalogSource(name string) bool { } return false } + +func isOADPRelatedResource(name, ns string) bool { + return strings.Contains(name, "oadp") || + strings.Contains(name, "velero") || + strings.Contains(name, ns) +} diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go index f6b97ff109f..cfa42d904df 100644 --- a/tests/olmv1/olmv1_suite_test.go +++ b/tests/olmv1/olmv1_suite_test.go @@ -316,7 +316,13 @@ func ensureClusterCatalog(ctx context.Context, name, image string) { } _, err := dynamicClient.Resource(clusterCatalogGVR).Create(ctx, cc, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { - log.Printf("ClusterCatalog %s already exists", name) + existing, getErr := dynamicClient.Resource(clusterCatalogGVR).Get(ctx, name, metav1.GetOptions{}) + if getErr == nil { + existingImage, _, _ := unstructured.NestedString(existing.Object, "spec", "source", "image", "ref") + log.Printf("ClusterCatalog %s already exists with image %s (expected %s)", name, existingImage, image) + gomega.Expect(existingImage).To(gomega.Equal(image), + "Existing ClusterCatalog %s has image %s but expected %s — delete it first or use matching image", name, existingImage, image) + } return } gomega.Expect(err).NotTo(gomega.HaveOccurred()) From da78e23c7069cdf3ff1e211471a6e1c9ba8d3c95 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:16:56 -0400 Subject: [PATCH 10/16] fix: replace xargs -r with portable POSIX alternative in Makefile xargs -r (--no-run-if-empty) is a GNU coreutils extension not available on macOS BSD xargs. Use shell variable + conditional instead. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index f7e5adc556e..57bd4567756 100644 --- a/Makefile +++ b/Makefile @@ -1250,16 +1250,16 @@ upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to -$(OC_CLI) delete catalogsource --all -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true @echo "=== Phase 3: Removing orphaned OADP/Velero CRDs ===" # OLMv1 cannot adopt CRDs it did not create - -$(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io' | \ - xargs -r $(OC_CLI) delete --ignore-not-found=true || true + -CRDS=$$($(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io'); \ + if [ -n "$$CRDS" ]; then echo "$$CRDS" | xargs $(OC_CLI) delete --ignore-not-found=true; fi || true @echo "=== Phase 3b: Removing OLMv0-managed remnant resources ===" # OLMv1 cannot adopt resources created by OLMv0's CSV (labeled olm.managed=true) -$(OC_CLI) delete sa,roles,rolebindings,deployments -l olm.managed=true -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true # Only delete cluster-scoped resources related to OADP (avoid breaking other operators in shared clusters) - -$(OC_CLI) get clusterroles -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)' | \ - xargs -r $(OC_CLI) delete --ignore-not-found=true || true - -$(OC_CLI) get clusterrolebindings -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)' | \ - xargs -r $(OC_CLI) delete --ignore-not-found=true || true + -CRS=$$($(OC_CLI) get clusterroles -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)'); \ + if [ -n "$$CRS" ]; then echo "$$CRS" | xargs $(OC_CLI) delete --ignore-not-found=true; fi || true + -CRBS=$$($(OC_CLI) get clusterrolebindings -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)'); \ + if [ -n "$$CRBS" ]; then echo "$$CRBS" | xargs $(OC_CLI) delete --ignore-not-found=true; fi || true @echo "=== Phase 4: Creating ClusterCatalog ===" @if [ -f /tmp/oadp-migrate-catalog-image ]; then \ CATALOG_IMG=$$(cat /tmp/oadp-migrate-catalog-image); \ From c8ccff97afcffd1608776f7931fce876e8a3cb18 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:17:32 -0400 Subject: [PATCH 11/16] fix: handle List errors in Eventually closures to prevent nil deref The Eventually closures for Subscription and CSV deletion verification ignored List errors. If List fails (transient API error), the nil list causes a panic on list.Items access. Use gomega.Gomega parameter to properly assert both error and emptiness. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- tests/olmv1/olmv1_migrate_test.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index a7aa4e80c4d..52477230613 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -119,10 +119,11 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - gomega.Eventually(func() int { - list, _ := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) - return len(list.Items) - }, 1*time.Minute, 5*time.Second).Should(gomega.Equal(0)) + gomega.Eventually(func(g gomega.Gomega) { + list, err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(list.Items).To(gomega.BeEmpty()) + }, 1*time.Minute, 5*time.Second).Should(gomega.Succeed()) }) ginkgo.It("should remove OLMv0 CSVs", func() { @@ -138,10 +139,11 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. } } - gomega.Eventually(func() int { - list, _ := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) - return len(list.Items) - }, 2*time.Minute, 5*time.Second).Should(gomega.Equal(0)) + gomega.Eventually(func(g gomega.Gomega) { + list, err := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(list.Items).To(gomega.BeEmpty()) + }, 2*time.Minute, 5*time.Second).Should(gomega.Succeed()) }) ginkgo.It("should remove OLMv0 OperatorGroup and CatalogSource", func() { From 87a3e6e2575a4b2b16bc2adaee89079e01d13043 Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:18:34 -0400 Subject: [PATCH 12/16] fix: log List errors in OLMv0 remnant cleanup instead of discarding Cleanup List calls for ServiceAccounts, Roles, RoleBindings, Deployments, ClusterRoles, and ClusterRoleBindings were silently discarding errors. Now log warnings on failure and skip iteration to prevent nil panics. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- tests/olmv1/olmv1_migrate_test.go | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index 52477230613..690039b66c7 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -187,29 +187,33 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. olmSelector := metav1.ListOptions{LabelSelector: "olm.managed=true"} ginkgo.By("Deleting OLMv0-managed namespace-scoped resources") - sas, _ := kubeClient.CoreV1().ServiceAccounts(namespace).List(ctx, olmSelector) - if sas != nil { + if sas, err := kubeClient.CoreV1().ServiceAccounts(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list ServiceAccounts: %v", err) + } else { for _, sa := range sas.Items { log.Printf("Deleting remnant ServiceAccount %s/%s", namespace, sa.Name) _ = kubeClient.CoreV1().ServiceAccounts(namespace).Delete(ctx, sa.Name, metav1.DeleteOptions{}) } } - roles, _ := kubeClient.RbacV1().Roles(namespace).List(ctx, olmSelector) - if roles != nil { + if roles, err := kubeClient.RbacV1().Roles(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list Roles: %v", err) + } else { for _, r := range roles.Items { log.Printf("Deleting remnant Role %s/%s", namespace, r.Name) _ = kubeClient.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}) } } - rbs, _ := kubeClient.RbacV1().RoleBindings(namespace).List(ctx, olmSelector) - if rbs != nil { + if rbs, err := kubeClient.RbacV1().RoleBindings(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list RoleBindings: %v", err) + } else { for _, rb := range rbs.Items { log.Printf("Deleting remnant RoleBinding %s/%s", namespace, rb.Name) _ = kubeClient.RbacV1().RoleBindings(namespace).Delete(ctx, rb.Name, metav1.DeleteOptions{}) } } - deploys, _ := kubeClient.AppsV1().Deployments(namespace).List(ctx, olmSelector) - if deploys != nil { + if deploys, err := kubeClient.AppsV1().Deployments(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list Deployments: %v", err) + } else { for _, d := range deploys.Items { log.Printf("Deleting remnant Deployment %s/%s", namespace, d.Name) _ = kubeClient.AppsV1().Deployments(namespace).Delete(ctx, d.Name, metav1.DeleteOptions{}) @@ -217,8 +221,9 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. } ginkgo.By("Deleting OLMv0-managed cluster-scoped resources related to OADP") - crs, _ := kubeClient.RbacV1().ClusterRoles().List(ctx, olmSelector) - if crs != nil { + if crs, err := kubeClient.RbacV1().ClusterRoles().List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list ClusterRoles: %v", err) + } else { for _, cr := range crs.Items { if !isOADPRelatedResource(cr.Name, namespace) { continue @@ -227,8 +232,9 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. _ = kubeClient.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}) } } - crbs, _ := kubeClient.RbacV1().ClusterRoleBindings().List(ctx, olmSelector) - if crbs != nil { + if crbs, err := kubeClient.RbacV1().ClusterRoleBindings().List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list ClusterRoleBindings: %v", err) + } else { for _, crb := range crbs.Items { if !isOADPRelatedResource(crb.Name, namespace) { continue From 4cab1a0a8fd198f6bbc650d8c102b8cdde45feeb Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:19:03 -0400 Subject: [PATCH 13/16] fix: set createdCatalog flag when existing ClusterCatalog matches image When ensureClusterCatalog finds an AlreadyExists catalog with a matching image, it returned without setting createdCatalog=true. This caused AfterAll to skip cleanup, leaving the catalog on the cluster after repeated test runs. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- tests/olmv1/olmv1_suite_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go index cfa42d904df..bfbc16dee61 100644 --- a/tests/olmv1/olmv1_suite_test.go +++ b/tests/olmv1/olmv1_suite_test.go @@ -323,6 +323,7 @@ func ensureClusterCatalog(ctx context.Context, name, image string) { gomega.Expect(existingImage).To(gomega.Equal(image), "Existing ClusterCatalog %s has image %s but expected %s — delete it first or use matching image", name, existingImage, image) } + createdCatalog = true return } gomega.Expect(err).NotTo(gomega.HaveOccurred()) From 929711e20597de8eac547714ef19d82a3e1aa74a Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:19:37 -0400 Subject: [PATCH 14/16] fix: wait for CRD deletion to complete before proceeding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CRD deletion in Kubernetes is asynchronous — finalizers run and existing CRs get garbage collected. If the install test creates a ClusterExtension before old CRDs are fully gone, OLMv1 may hit ownership conflicts. Now polls until each deleted CRD is confirmed gone. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- tests/olmv1/olmv1_suite_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go index bfbc16dee61..6a9e85941c8 100644 --- a/tests/olmv1/olmv1_suite_test.go +++ b/tests/olmv1/olmv1_suite_test.go @@ -280,19 +280,27 @@ func cleanupOrphanedCRDs(ctx context.Context) { log.Printf("Warning: failed to list CRDs: %v", err) return } - var deleted int + var deletedNames []string for _, crd := range crdList.Items { name := crd.GetName() if strings.HasSuffix(name, ".oadp.openshift.io") || strings.HasSuffix(name, ".velero.io") { if err := dynamicClient.Resource(crdGVR).Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { log.Printf("Warning: failed to delete CRD %s: %v", name, err) } else { - deleted++ + deletedNames = append(deletedNames, name) } } } - if deleted > 0 { - log.Printf("Deleted %d orphaned OADP/Velero CRDs", deleted) + if len(deletedNames) > 0 { + log.Printf("Deleted %d orphaned OADP/Velero CRDs, waiting for removal", len(deletedNames)) + for _, name := range deletedNames { + gomega.Eventually(func() bool { + _, err := dynamicClient.Resource(crdGVR).Get(ctx, name, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue(), + "CRD %s should be fully removed", name) + } + log.Print("All orphaned CRDs fully removed") } } From 8d1fb1759fb9e088e45b2151b88ffeb8dc564a0d Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:20:33 -0400 Subject: [PATCH 15/16] fix: delete existing ClusterExtension before Create for rerun safety Both install and migration tests failed with AlreadyExists when a previous run left a ClusterExtension behind. Now delete any existing CE before creating. Also remove hardcoded clusterExtensionName const in favor of the configurable packageName flag, aligning CE name with test-olmv1-cleanup target which deletes $(OLMV1_PACKAGE). Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- tests/olmv1/olmv1_install_test.go | 25 +++++++++++++------------ tests/olmv1/olmv1_migrate_test.go | 3 +++ 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/tests/olmv1/olmv1_install_test.go b/tests/olmv1/olmv1_install_test.go index 8fe780dfb59..f9b33a076d5 100644 --- a/tests/olmv1/olmv1_install_test.go +++ b/tests/olmv1/olmv1_install_test.go @@ -15,8 +15,6 @@ import ( ) const ( - clusterExtensionName = "oadp-operator" - oadpCRDName = "dataprotectionapplications.oadp.openshift.io" veleroCRDName = "backups.velero.io" restoreCRDName = "restores.velero.io" @@ -45,13 +43,13 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol ginkgo.AfterAll(func() { ginkgo.By("Cleaning up OLMv1 test resources") - err := deleteClusterExtension(ctx, clusterExtensionName) + err := deleteClusterExtension(ctx, packageName) if err != nil { log.Printf("Warning: failed to delete ClusterExtension: %v", err) } gomega.Eventually(func() bool { - _, err := getClusterExtension(ctx, clusterExtensionName) + _, err := getClusterExtension(ctx, packageName) return apierrors.IsNotFound(err) }, 3*time.Minute, 5*time.Second).Should(gomega.BeTrue(), "ClusterExtension should be deleted") @@ -64,11 +62,14 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol }) ginkgo.It("should install OADP operator via ClusterExtension", func() { + ginkgo.By("Cleaning up any existing ClusterExtension from previous runs") + _ = deleteClusterExtension(ctx, packageName) + ginkgo.By("Creating the ClusterExtension") - ce := buildClusterExtension(clusterExtensionName, packageName, namespace, serviceAccountName) + ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", clusterExtensionName, packageName, namespace) + log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", packageName, packageName, namespace) ginkgo.By("Waiting for ClusterExtension to be installed") terminalReasons := map[string]bool{ @@ -76,7 +77,7 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol "Failed": true, } gomega.Eventually(func(g gomega.Gomega) { - obj, err := getClusterExtension(ctx, clusterExtensionName) + obj, err := getClusterExtension(ctx, packageName) g.Expect(err).NotTo(gomega.HaveOccurred(), "ClusterExtension should exist") logAllConditions(obj) @@ -96,7 +97,7 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) ginkgo.By("Checking installed bundle info") - obj, err := getClusterExtension(ctx, clusterExtensionName) + obj, err := getClusterExtension(ctx, packageName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) bundleName, bundleVersion, found := getInstalledBundle(obj) gomega.Expect(found).To(gomega.BeTrue(), "installed bundle should be present in status") @@ -143,7 +144,7 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol }) ginkgo.It("should not report deprecation warnings", func() { - obj, err := getClusterExtension(ctx, clusterExtensionName) + obj, err := getClusterExtension(ctx, packageName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, condType := range []string{"Deprecated", "PackageDeprecated", "ChannelDeprecated", "BundleDeprecated"} { @@ -165,20 +166,20 @@ var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("ol ginkgo.It("should upgrade the ClusterExtension to the target version", func() { ginkgo.By(fmt.Sprintf("Patching ClusterExtension version to %s", upgradeVersion)) - obj, err := getClusterExtension(ctx, clusterExtensionName) + obj, err := getClusterExtension(ctx, packageName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) previousBundleName, previousVersion, _ := getInstalledBundle(obj) log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion) patch := []byte(fmt.Sprintf(`{"spec":{"source":{"catalog":{"version":"%s","upgradeConstraintPolicy":"SelfCertified"}}}}`, upgradeVersion)) - _, err = dynamicClient.Resource(clusterExtensionGVR).Patch(ctx, clusterExtensionName, types.MergePatchType, patch, metav1.PatchOptions{}) + _, err = dynamicClient.Resource(clusterExtensionGVR).Patch(ctx, packageName, types.MergePatchType, patch, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Printf("Patched ClusterExtension version to %s", upgradeVersion) ginkgo.By("Waiting for upgrade to complete") gomega.Eventually(func() string { - updated, err := getClusterExtension(ctx, clusterExtensionName) + updated, err := getClusterExtension(ctx, packageName) if err != nil { return "" } diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index 690039b66c7..e28dae3b2bb 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -260,6 +260,9 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. ensureServiceAccount(ctx, serviceAccountName, namespace) ensureClusterAdminBinding(ctx, serviceAccountName, namespace) + ginkgo.By("Cleaning up any existing ClusterExtension from previous runs") + _ = deleteClusterExtension(ctx, packageName) + ginkgo.By("Creating the ClusterExtension") ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) From 3cc37339ba52d0d2808acb8d8120446110c678de Mon Sep 17 00:00:00 2001 From: Tiger Kaovilai Date: Mon, 11 May 2026 17:21:21 -0400 Subject: [PATCH 16/16] fix: pin catalog selector in migration path when catalog is auto-detected buildClusterExtension only set the catalog selector when the global catalogImage flag was non-empty. In migration, the catalog image is auto-detected from the Subscription (migratedCatalogImage) while catalogImage stays empty, so the selector was never set and OLMv1 could pick a wrong default/community catalog. Add withCatalogSelector option function so the migration test can explicitly pin to the created ClusterCatalog. Generated with [Claude Code](https://claude.ai/code) via [Happy](https://happy.engineering) Co-Authored-By: Claude Co-Authored-By: Happy Signed-off-by: Tiger Kaovilai --- tests/olmv1/olmv1_migrate_test.go | 6 +++++- tests/olmv1/olmv1_suite_test.go | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go index e28dae3b2bb..a8dad7908ca 100644 --- a/tests/olmv1/olmv1_migrate_test.go +++ b/tests/olmv1/olmv1_migrate_test.go @@ -264,7 +264,11 @@ var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo. _ = deleteClusterExtension(ctx, packageName) ginkgo.By("Creating the ClusterExtension") - ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) + var ceOpts []func(map[string]interface{}) + if migratedCatalogImage != "" { + ceOpts = append(ceOpts, withCatalogSelector(catalogName)) + } + ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName, ceOpts...) _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go index 6a9e85941c8..e7888e13e88 100644 --- a/tests/olmv1/olmv1_suite_test.go +++ b/tests/olmv1/olmv1_suite_test.go @@ -130,7 +130,7 @@ func ensureClusterAdminBinding(ctx context.Context, saName, ns string) { log.Printf("Created ClusterRoleBinding %s", bindingName) } -func buildClusterExtension(name, pkg, ns, sa string) *unstructured.Unstructured { +func buildClusterExtension(name, pkg, ns, sa string, opts ...func(map[string]interface{})) *unstructured.Unstructured { spec := map[string]interface{}{ "namespace": ns, "serviceAccount": map[string]interface{}{ @@ -178,10 +178,23 @@ func buildClusterExtension(name, pkg, ns, sa string) *unstructured.Unstructured if version != "" { catalogSpec["version"] = version } + for _, opt := range opts { + opt(catalogSpec) + } return ce } +func withCatalogSelector(catalog string) func(map[string]interface{}) { + return func(catalogSpec map[string]interface{}) { + catalogSpec["selector"] = map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "olm.operatorframework.io/metadata.name": catalog, + }, + } + } +} + func getClusterExtension(ctx context.Context, name string) (*unstructured.Unstructured, error) { return dynamicClient.Resource(clusterExtensionGVR).Get(ctx, name, metav1.GetOptions{}) }