diff --git a/.gitignore b/.gitignore index 05921057c15..eb346d683de 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,8 @@ go.work *~ # OADP +# Generated by `make generate-olmv1-manifest` — regenerated each run with current config +oadp-olmv1-manifest.yaml tests/e2e/e2e.test tests/e2e/templates/*.yaml .DS_Store diff --git a/.golangci.yaml b/.golangci.yaml index d8ad4cb082f..f9f13476bd2 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -77,6 +77,7 @@ linters: - third_party$ - builtin$ - examples$ + - tests/olmv1 issues: max-issues-per-linter: 0 max-same-issues: 0 diff --git a/Makefile b/Makefile index 9d10c0d358b..57bd4567756 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,7 @@ vet: check-go ## Run go vet against code. .PHONY: test test: check-go vet envtest ## Run unit tests; run Go linters checks; check if api and bundle folders are up to date; and check if go dependencies are valid @make versions - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e) -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e | grep -v /tests/olmv1) -coverprofile cover.out @make lint @make api-isupdated @make bundle-isupdated @@ -999,7 +999,25 @@ test-e2e: test-e2e-setup install-ginkgo ## Run E2E tests against OADP operator i fi; \ ./tests/e2e/scripts/analyze_failures.sh $${EXIT_CODE:-0}; \ fi; \ - exit $${EXIT_CODE:-0} + echo $${EXIT_CODE:-0} > /tmp/oadp-e2e-exit-code + # OLMv0→OLMv1 migration test runs after e2e. It removes the OLMv0 install + # (Subscription, CSV, CRDs) and reinstalls via OLMv1 ClusterExtension. + # + # CI: Prow's optional-operators-subscribe already created the OLMv0 install. + # The test auto-detects the CatalogSource image from the Subscription. + # Local: Requires OLMv0 install first (make deploy-olm). The test auto-detects + # the CatalogSource, but if you used a custom catalog you may need to set + # OLMV1_CATALOG_IMAGE. If this step fails locally, check: + # - OCP 4.20+ is required (OLMv1 APIs must exist) + # - ttl.sh catalog images expire after TTL_DURATION (default 1h) + # - Run "make test-upgrade-v0-to-olmv1" standalone to iterate on failures + # + # Migration failure does not mask the e2e exit code. Migration results are + # captured separately in junit_olmv1_report.xml. + -$(MAKE) test-upgrade-v0-to-olmv1 + @E2E_EXIT=$$(cat /tmp/oadp-e2e-exit-code 2>/dev/null || echo 0); \ + rm -f /tmp/oadp-e2e-exit-code; \ + exit $$E2E_EXIT .PHONY: test-e2e-cleanup test-e2e-cleanup: login-required @@ -1017,6 +1035,298 @@ test-e2e-cleanup: login-required $(OC_CLI) delete ns mysql-persistent --ignore-not-found=true rm -rf $(SETTINGS_TMP) +##@ OLMv1 Tests +# +# OLMv1 migration and lifecycle tests validate installing OADP via OLMv1 +# ClusterExtension, including migrating from an existing OLMv0 (Subscription/CSV) +# install. Two complementary approaches exist: +# +# Makefile targets (upgrade-v0-to-olmv1): +# Shell-based migration using oc commands. Useful for local development and +# quick iteration. Builds fresh operator/bundle/catalog images, removes OLMv0 +# resources, creates a ClusterCatalog, and installs via ClusterExtension. +# +# Go tests (test-upgrade-v0-to-olmv1): +# Ginkgo test suite that performs the same migration with detailed assertions, +# version verification, and structured JUnit output for CI reporting. +# +# Local usage: +# make deploy-olm # Install OADP via OLMv0 first +# make upgrade-v0-to-olmv1 # Shell-based migration (builds fresh images) +# # or +# make test-upgrade-v0-to-olmv1 # Go test migration (needs catalog-image or +# # pre-existing CatalogSource to auto-detect) +# +# CI/Prow usage (presubmit with optional-operators-subscribe workflow): +# 1. ci-operator builds the test image from build/ci-Dockerfile +# 2. optional-operators-subscribe creates a CatalogSource (from OO_INDEX) + +# OperatorGroup + Subscription in OO_INSTALL_NAMESPACE (openshift-adp), +# giving us a running OLMv0 install +# 3. The test step runs: make test-upgrade-v0-to-olmv1 +# 4. The Go test auto-detects the CatalogSource image from the Subscription, +# removes all OLMv0 resources, creates a ClusterCatalog from the same image, +# and installs via ClusterExtension +# 5. Version is verified: OLMv0 CSV version must match OLMv1 installed version +# +# The Go test is compatible with any CatalogSource placement (openshift-marketplace +# or operator namespace) — it reads spec.sourceNamespace from the Subscription. + +OLMV1_PACKAGE ?= oadp-operator +OLMV1_NAMESPACE ?= $(OADP_TEST_NAMESPACE) +OLMV1_CHANNEL ?= +OLMV1_VERSION ?= +OLMV1_UPGRADE_VERSION ?= +OLMV1_CATALOG ?= oadp-olmv1-test-catalog +OLMV1_CATALOG_IMAGE ?= +OLMV1_SERVICE_ACCOUNT ?= $(OLMV1_PACKAGE)-installer +OLMV1_INSTALLER_BINDING ?= $(OLMV1_SERVICE_ACCOUNT)-binding +OLMV1_FAIL_FAST ?= true + +OLMV1_GINKGO_FLAGS = --vv \ + --no-color=$(OPENSHIFT_CI) \ + --label-filter="olmv1" \ + --junit-report="$(ARTIFACT_DIR)/junit_olmv1_report.xml" \ + --fail-fast=$(OLMV1_FAIL_FAST) \ + --timeout=30m + +.PHONY: test-olmv1 +test-olmv1: login-required install-ginkgo ## Run OLMv1 lifecycle tests (install, verify, upgrade, cleanup) against a cluster with OLMv1 enabled. + ginkgo run -mod=mod $(OLMV1_GINKGO_FLAGS) $(GINKGO_ARGS) tests/olmv1/ -- \ + -namespace=$(OLMV1_NAMESPACE) \ + -package=$(OLMV1_PACKAGE) \ + -channel=$(OLMV1_CHANNEL) \ + -version=$(OLMV1_VERSION) \ + -upgrade-version=$(OLMV1_UPGRADE_VERSION) \ + -catalog=$(OLMV1_CATALOG) \ + -catalog-image=$(OLMV1_CATALOG_IMAGE) \ + -service-account=$(OLMV1_SERVICE_ACCOUNT) \ + -artifact_dir=$(ARTIFACT_DIR) + +.PHONY: test-olmv1-cleanup +test-olmv1-cleanup: login-required ## Cleanup resources created by OLMv1 tests. + $(OC_CLI) delete clusterextension $(OLMV1_PACKAGE) --ignore-not-found=true + $(OC_CLI) delete clustercatalog $(OLMV1_CATALOG) --ignore-not-found=true + $(OC_CLI) delete clusterrolebinding $(OLMV1_INSTALLER_BINDING) --ignore-not-found=true + $(OC_CLI) delete sa $(OLMV1_SERVICE_ACCOUNT) -n $(OLMV1_NAMESPACE) --ignore-not-found=true + +# deploy-olmv1-mirror-catalog: Deploy a ClusterCatalog from a productized index +# image for testing when the current OCP version's redhat-operator-index does +# not include redhat-oadp-operator (e.g., 4.22 only ships OLMv1-curated packages). +# +# Usage: +# make deploy-olmv1-mirror-catalog # defaults to v4.21 +# make deploy-olmv1-mirror-catalog OLMV1_MIRROR_INDEX=registry.redhat.io/redhat/redhat-operator-index:v4.20 +# make deploy-olmv1-mirror-catalog OLMV1_MIRROR_PACKAGE=redhat-oadp-operator +OLMV1_MIRROR_INDEX ?= registry.redhat.io/redhat/redhat-operator-index:v4.21 +OLMV1_MIRROR_CATALOG ?= oadp-v0-mirror-test-catalog +OLMV1_MIRROR_PACKAGE ?= redhat-oadp-operator + +.PHONY: deploy-olmv1-mirror-catalog +deploy-olmv1-mirror-catalog: login-required ## Deploy a ClusterCatalog from a productized index image for OLMv1 testing. + @echo "=== Deploying mirror ClusterCatalog from $(OLMV1_MIRROR_INDEX) ===" + @printf '%s\n' \ + 'apiVersion: olm.operatorframework.io/v1' \ + 'kind: ClusterCatalog' \ + 'metadata:' \ + ' name: $(OLMV1_MIRROR_CATALOG)' \ + 'spec:' \ + ' source:' \ + ' type: Image' \ + ' image:' \ + ' ref: $(OLMV1_MIRROR_INDEX)' \ + | $(OC_CLI) apply -f - + @echo "Waiting for ClusterCatalog $(OLMV1_MIRROR_CATALOG) to be serving..." + $(OC_CLI) wait clustercatalog/$(OLMV1_MIRROR_CATALOG) --for=condition=Serving=True --timeout=300s + @echo "" + @echo "Mirror catalog ready. Install $(OLMV1_MIRROR_PACKAGE) via OLMv1:" + @echo " make test-olmv1 OLMV1_PACKAGE=$(OLMV1_MIRROR_PACKAGE) OLMV1_CATALOG=$(OLMV1_MIRROR_CATALOG) OLMV1_CATALOG_IMAGE=$(OLMV1_MIRROR_INDEX)" + @echo " # or for migration test:" + @echo " make test-upgrade-v0-to-olmv1 OLMV1_PACKAGE=$(OLMV1_MIRROR_PACKAGE) OLMV1_CATALOG=$(OLMV1_MIRROR_CATALOG) OLMV1_CATALOG_IMAGE=$(OLMV1_MIRROR_INDEX)" + +.PHONY: undeploy-olmv1-mirror-catalog +undeploy-olmv1-mirror-catalog: login-required ## Remove the mirror ClusterCatalog. + $(OC_CLI) delete clustercatalog $(OLMV1_MIRROR_CATALOG) --ignore-not-found=true + +OLMV1_MANIFEST ?= oadp-olmv1-manifest.yaml + +.PHONY: generate-olmv1-manifest +generate-olmv1-manifest: ## Generate OLMv1 install manifest (Namespace, SA, CRB, ClusterExtension) per OCPSTRAT-2268 template. + @printf '%s\n' \ + '---' \ + 'apiVersion: v1' \ + 'kind: Namespace' \ + 'metadata:' \ + ' name: $(OLMV1_NAMESPACE)' \ + '---' \ + 'apiVersion: v1' \ + 'kind: ServiceAccount' \ + 'metadata:' \ + ' name: $(OLMV1_SERVICE_ACCOUNT)' \ + ' namespace: $(OLMV1_NAMESPACE)' \ + '---' \ + 'apiVersion: rbac.authorization.k8s.io/v1' \ + 'kind: ClusterRoleBinding' \ + 'metadata:' \ + ' name: $(OLMV1_INSTALLER_BINDING)' \ + 'roleRef:' \ + ' apiGroup: rbac.authorization.k8s.io' \ + ' kind: ClusterRole' \ + ' name: cluster-admin' \ + 'subjects:' \ + '- kind: ServiceAccount' \ + ' name: $(OLMV1_SERVICE_ACCOUNT)' \ + ' namespace: $(OLMV1_NAMESPACE)' \ + '---' \ + 'apiVersion: olm.operatorframework.io/v1' \ + 'kind: ClusterExtension' \ + 'metadata:' \ + ' name: $(OLMV1_PACKAGE)' \ + 'spec:' \ + ' namespace: $(OLMV1_NAMESPACE)' \ + ' serviceAccount:' \ + ' name: $(OLMV1_SERVICE_ACCOUNT)' \ + ' config:' \ + ' configType: Inline' \ + ' inline:' \ + ' watchNamespace: $(OLMV1_NAMESPACE)' \ + ' source:' \ + ' sourceType: Catalog' \ + ' catalog:' \ + ' packageName: $(OLMV1_PACKAGE)' \ + > $(OLMV1_MANIFEST) + @if [ -n "$(OLMV1_PIN_CATALOG)" ]; then \ + printf ' selector:\n' >> $(OLMV1_MANIFEST); \ + printf ' matchLabels:\n' >> $(OLMV1_MANIFEST); \ + printf ' olm.operatorframework.io/metadata.name: %s\n' '$(OLMV1_PIN_CATALOG)' >> $(OLMV1_MANIFEST); \ + fi + @if [ -n "$(OLMV1_CHANNEL)" ]; then \ + printf ' channels:\n' >> $(OLMV1_MANIFEST); \ + printf ' - %s\n' '$(OLMV1_CHANNEL)' >> $(OLMV1_MANIFEST); \ + fi + @if [ -n "$(OLMV1_VERSION)" ]; then \ + printf ' version: "%s"\n' '$(OLMV1_VERSION)' >> $(OLMV1_MANIFEST); \ + fi + @echo "Generated $(OLMV1_MANIFEST)" + +# upgrade-v0-to-olmv1: Shell-based OLMv0→OLMv1 migration for local development. +# Builds fresh operator+bundle+catalog images (ttl.sh), removes OLMv0 resources, +# creates ClusterCatalog, and installs via ClusterExtension. +# Skip the build by passing OLMV1_CATALOG_IMAGE=. +# Usage: make deploy-olm && make upgrade-v0-to-olmv1 +.PHONY: upgrade-v0-to-olmv1 +upgrade-v0-to-olmv1: UPGRADE_OPERATOR_IMAGE?=ttl.sh/oadp-operator-$(GIT_REV):$(TTL_DURATION) +upgrade-v0-to-olmv1: UPGRADE_BUNDLE_IMAGE?=ttl.sh/oadp-operator-bundle-$(GIT_REV):$(TTL_DURATION) +upgrade-v0-to-olmv1: UPGRADE_CATALOG_IMAGE?=ttl.sh/oadp-operator-catalog-$(GIT_REV):$(TTL_DURATION) +upgrade-v0-to-olmv1: UPGRADE_TMP:=$(shell mktemp -d)/ +upgrade-v0-to-olmv1: login-required ## Migrate an existing OLMv0 OADP install to OLMv1 (ClusterExtension). Requires OCP 4.20+. + $(OC_CLI) whoami + @echo "=== Phase 1: Building fresh catalog image ===" + @if [ -n "$(OLMV1_CATALOG_IMAGE)" ]; then \ + echo "Using provided catalog image: $(OLMV1_CATALOG_IMAGE)"; \ + echo "$(OLMV1_CATALOG_IMAGE)" > /tmp/oadp-migrate-catalog-image; \ + else \ + echo "Building operator, bundle, and catalog images (avoids expired ttl.sh images)..."; \ + echo " Operator: $(UPGRADE_OPERATOR_IMAGE)"; \ + echo " Bundle: $(UPGRADE_BUNDLE_IMAGE)"; \ + echo " Catalog: $(UPGRADE_CATALOG_IMAGE)"; \ + cp -r . $(UPGRADE_TMP) && cd $(UPGRADE_TMP) && \ + IMG=$(UPGRADE_OPERATOR_IMAGE) BUNDLE_IMG=$(UPGRADE_BUNDLE_IMAGE) BUNDLE_IMGS=$(UPGRADE_BUNDLE_IMAGE) CATALOG_IMG=$(UPGRADE_CATALOG_IMAGE) \ + make docker-build docker-push bundle bundle-build bundle-push catalog-build catalog-push; \ + chmod -R 777 $(UPGRADE_TMP) && rm -rf $(UPGRADE_TMP); \ + echo "$(UPGRADE_CATALOG_IMAGE)" > /tmp/oadp-migrate-catalog-image; \ + fi + @echo "=== Phase 2: Removing OLMv0 resources ===" + -$(OC_CLI) delete subscription oadp-operator -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true + -$(OC_CLI) get subscription -n $(OADP_TEST_NAMESPACE) -o name 2>/dev/null | \ + xargs -I {} sh -c '$(OC_CLI) get {} -n $(OADP_TEST_NAMESPACE) -o jsonpath='"'"'{.metadata.name}{"\t"}{.spec.source}{"\n"}'"'"' 2>/dev/null' | \ + grep "$(CATALOG_SOURCE_NAME)" | cut -f1 | \ + xargs -I {} $(OC_CLI) delete subscription {} -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + -$(OC_CLI) delete csv -l operators.coreos.com/oadp-operator.$(OADP_TEST_NAMESPACE) -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true + -$(OC_CLI) get csv -n $(OADP_TEST_NAMESPACE) -o name 2>/dev/null | grep oadp-operator | \ + xargs -I {} $(OC_CLI) delete {} -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + -$(OC_CLI) delete operatorgroup oadp-operator-group -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true + -$(OC_CLI) delete catalogsource $(CATALOG_SOURCE_NAME) -n $(CATALOG_SOURCE_NAMESPACE) --ignore-not-found=true + # CI (optional-operators-subscribe) may place CatalogSource in the operator namespace + -$(OC_CLI) delete catalogsource --all -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + @echo "=== Phase 3: Removing orphaned OADP/Velero CRDs ===" + # OLMv1 cannot adopt CRDs it did not create + -CRDS=$$($(OC_CLI) get crd -o name 2>/dev/null | grep -E '\.oadp\.openshift\.io|\.velero\.io'); \ + if [ -n "$$CRDS" ]; then echo "$$CRDS" | xargs $(OC_CLI) delete --ignore-not-found=true; fi || true + @echo "=== Phase 3b: Removing OLMv0-managed remnant resources ===" + # OLMv1 cannot adopt resources created by OLMv0's CSV (labeled olm.managed=true) + -$(OC_CLI) delete sa,roles,rolebindings,deployments -l olm.managed=true -n $(OADP_TEST_NAMESPACE) --ignore-not-found=true || true + # Only delete cluster-scoped resources related to OADP (avoid breaking other operators in shared clusters) + -CRS=$$($(OC_CLI) get clusterroles -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)'); \ + if [ -n "$$CRS" ]; then echo "$$CRS" | xargs $(OC_CLI) delete --ignore-not-found=true; fi || true + -CRBS=$$($(OC_CLI) get clusterrolebindings -l olm.managed=true -o name 2>/dev/null | grep -E 'oadp|velero|$(OADP_TEST_NAMESPACE)'); \ + if [ -n "$$CRBS" ]; then echo "$$CRBS" | xargs $(OC_CLI) delete --ignore-not-found=true; fi || true + @echo "=== Phase 4: Creating ClusterCatalog ===" + @if [ -f /tmp/oadp-migrate-catalog-image ]; then \ + CATALOG_IMG=$$(cat /tmp/oadp-migrate-catalog-image); \ + echo "Creating ClusterCatalog $(OLMV1_CATALOG) from image $$CATALOG_IMG"; \ + printf '%s\n' \ + 'apiVersion: olm.operatorframework.io/v1' \ + 'kind: ClusterCatalog' \ + 'metadata:' \ + ' name: $(OLMV1_CATALOG)' \ + 'spec:' \ + ' source:' \ + ' type: Image' \ + ' image:' \ + " ref: $$CATALOG_IMG" \ + | $(OC_CLI) apply -f -; \ + echo "Waiting for ClusterCatalog to be serving..."; \ + if ! $(OC_CLI) wait clustercatalog/$(OLMV1_CATALOG) --for=condition=Serving=True --timeout=120s; then \ + echo ""; \ + echo "ERROR: ClusterCatalog $(OLMV1_CATALOG) failed to reach Serving state."; \ + echo "Catalog image: $$CATALOG_IMG"; \ + echo "This usually means the catalog image is expired or cannot be pulled."; \ + echo "If using ttl.sh, images expire after TTL_DURATION (default: 1h)."; \ + echo ""; \ + echo "=== ClusterCatalog status ==="; \ + $(OC_CLI) get clustercatalog $(OLMV1_CATALOG) -o yaml 2>/dev/null || true; \ + echo "=== catalogd pod logs (last 30 lines) ==="; \ + $(OC_CLI) logs -n openshift-catalogd -l app.kubernetes.io/name=catalogd --tail=30 2>/dev/null || true; \ + rm -f /tmp/oadp-migrate-catalog-image; \ + exit 1; \ + fi; \ + rm -f /tmp/oadp-migrate-catalog-image; \ + else \ + echo "Skipping — no custom catalog to migrate"; \ + fi + @echo "=== Phase 5: Applying OLMv1 manifest ===" + -$(OC_CLI) delete clusterextension $(OLMV1_PACKAGE) --ignore-not-found=true + $(MAKE) generate-olmv1-manifest OLMV1_PIN_CATALOG=$(OLMV1_CATALOG) + $(OC_CLI) apply -f $(OLMV1_MANIFEST) + @echo "=== Phase 6: Waiting for ClusterExtension Installed=True ===" + $(OC_CLI) wait clusterextension/$(OLMV1_PACKAGE) \ + --for=condition=Installed=True --timeout=600s + @echo "Migration complete." + $(OC_CLI) get clusterextension $(OLMV1_PACKAGE) + +# test-upgrade-v0-to-olmv1: Ginkgo-based OLMv0→OLMv1 migration test with assertions. +# Expects a pre-existing OLMv0 install (Subscription + CSV running). +# +# Local: make deploy-olm && make test-upgrade-v0-to-olmv1 OLMV1_CATALOG_IMAGE= +# CI: Prow's optional-operators-subscribe installs OLMv0 first, then this target +# runs. The test auto-detects the CatalogSource image from the Subscription +# (no OLMV1_CATALOG_IMAGE needed — works with CI-created CatalogSource in any +# namespace). Verifies same version installed before/after migration. +.PHONY: test-upgrade-v0-to-olmv1 +test-upgrade-v0-to-olmv1: login-required install-ginkgo ## Test OLMv0->OLMv1 migration path. Expects a pre-existing OLMv0 OADP install (run make deploy-olm first). + ginkgo run -mod=mod $(OLMV1_GINKGO_FLAGS) \ + --label-filter="olmv1-migrate" \ + $(GINKGO_ARGS) tests/olmv1/ -- \ + -namespace=$(OLMV1_NAMESPACE) \ + -package=$(OLMV1_PACKAGE) \ + -channel=$(OLMV1_CHANNEL) \ + -version=$(OLMV1_VERSION) \ + -catalog=$(OLMV1_CATALOG) \ + -catalog-image=$(OLMV1_CATALOG_IMAGE) \ + -service-account=$(OLMV1_SERVICE_ACCOUNT) \ + -migrate=true \ + -artifact_dir=$(ARTIFACT_DIR) + .PHONY: update-non-admin-manifests update-non-admin-manifests: NON_ADMIN_CONTROLLER_IMG?=quay.io/konveyor/oadp-non-admin:latest update-non-admin-manifests: yq ## Update Non Admin Controller (NAC) manifests shipped with OADP, from NON_ADMIN_CONTROLLER_PATH diff --git a/bundle/manifests/oadp-operator.clusterserviceversion.yaml b/bundle/manifests/oadp-operator.clusterserviceversion.yaml index 44481ede449..81cc9d6c757 100644 --- a/bundle/manifests/oadp-operator.clusterserviceversion.yaml +++ b/bundle/manifests/oadp-operator.clusterserviceversion.yaml @@ -1639,7 +1639,7 @@ spec: installModes: - supported: true type: OwnNamespace - - supported: false + - supported: true type: SingleNamespace - supported: false type: MultiNamespace diff --git a/config/manifests/bases/oadp-operator.clusterserviceversion.yaml b/config/manifests/bases/oadp-operator.clusterserviceversion.yaml index 798be1cc83c..95939eaab1d 100644 --- a/config/manifests/bases/oadp-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/oadp-operator.clusterserviceversion.yaml @@ -467,7 +467,7 @@ spec: installModes: - supported: true type: OwnNamespace - - supported: false + - supported: true type: SingleNamespace - supported: false type: MultiNamespace diff --git a/tests/olmv1/.gitignore b/tests/olmv1/.gitignore new file mode 100644 index 00000000000..3fec32c8427 --- /dev/null +++ b/tests/olmv1/.gitignore @@ -0,0 +1 @@ +tmp/ diff --git a/tests/olmv1/olmv1_install_test.go b/tests/olmv1/olmv1_install_test.go new file mode 100644 index 00000000000..f9b33a076d5 --- /dev/null +++ b/tests/olmv1/olmv1_install_test.go @@ -0,0 +1,226 @@ +package olmv1_test + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + oadpCRDName = "dataprotectionapplications.oadp.openshift.io" + veleroCRDName = "backups.velero.io" + restoreCRDName = "restores.velero.io" + + managerLabelSelector = "control-plane=controller-manager" +) + +var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("olmv1"), func() { + ctx := context.Background() + + ginkgo.BeforeAll(func() { + ginkgo.By("Cleaning up orphaned OADP/Velero CRDs from previous installs") + cleanupOrphanedCRDs(ctx) + + ginkgo.By("Setting up namespace, ServiceAccount, and RBAC") + ensureNamespace(ctx, namespace) + ensureServiceAccount(ctx, serviceAccountName, namespace) + ensureClusterAdminBinding(ctx, serviceAccountName, namespace) + + if catalogImage != "" { + ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, catalogImage)) + ensureClusterCatalog(ctx, catalogName, catalogImage) + waitForClusterCatalogServing(ctx, catalogName) + } + }) + + ginkgo.AfterAll(func() { + ginkgo.By("Cleaning up OLMv1 test resources") + err := deleteClusterExtension(ctx, packageName) + if err != nil { + log.Printf("Warning: failed to delete ClusterExtension: %v", err) + } + + gomega.Eventually(func() bool { + _, err := getClusterExtension(ctx, packageName) + return apierrors.IsNotFound(err) + }, 3*time.Minute, 5*time.Second).Should(gomega.BeTrue(), "ClusterExtension should be deleted") + + if createdCatalog { + ginkgo.By(fmt.Sprintf("Deleting ClusterCatalog %s", catalogName)) + deleteClusterCatalog(ctx, catalogName) + } + + cleanupClusterRoleBinding(ctx, serviceAccountName) + }) + + ginkgo.It("should install OADP operator via ClusterExtension", func() { + ginkgo.By("Cleaning up any existing ClusterExtension from previous runs") + _ = deleteClusterExtension(ctx, packageName) + + ginkgo.By("Creating the ClusterExtension") + ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName) + _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", packageName, packageName, namespace) + + ginkgo.By("Waiting for ClusterExtension to be installed") + terminalReasons := map[string]bool{ + "InvalidConfiguration": true, + "Failed": true, + } + gomega.Eventually(func(g gomega.Gomega) { + obj, err := getClusterExtension(ctx, packageName) + g.Expect(err).NotTo(gomega.HaveOccurred(), "ClusterExtension should exist") + + logAllConditions(obj) + + progCond, progFound := getCondition(obj, "Progressing") + if progFound { + reason, _ := progCond["reason"].(string) + message, _ := progCond["message"].(string) + g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(), + "ClusterExtension has terminal error on Progressing: reason=%s message=%s", reason, message) + } + + instCond, instFound := getCondition(obj, "Installed") + g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present") + status, _ := instCond["status"].(string) + g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") + }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) + + ginkgo.By("Checking installed bundle info") + obj, err := getClusterExtension(ctx, packageName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bundleName, bundleVersion, found := getInstalledBundle(obj) + gomega.Expect(found).To(gomega.BeTrue(), "installed bundle should be present in status") + log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion) + }) + + ginkgo.It("should have the OADP controller-manager pod running", func() { + ginkgo.By("Waiting for controller-manager pod to be Running") + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: managerLabelSelector, + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + log.Printf("Controller-manager pod %s is Running", pod.Name) + return true, nil + } + log.Printf("Controller-manager pod %s phase: %s", pod.Name, pod.Status.Phase) + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running") + }) + + ginkgo.It("should have OADP CRDs installed", func() { + expectedCRDs := []string{ + oadpCRDName, + veleroCRDName, + restoreCRDName, + "schedules.velero.io", + "backupstoragelocations.velero.io", + "volumesnapshotlocations.velero.io", + } + + for _, crdName := range expectedCRDs { + ginkgo.By(fmt.Sprintf("Checking CRD %s exists", crdName)) + exists, err := crdExists(ctx, crdName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("CRD %s should exist", crdName)) + log.Printf("CRD %s exists", crdName) + } + }) + + ginkgo.It("should not report deprecation warnings", func() { + obj, err := getClusterExtension(ctx, packageName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, condType := range []string{"Deprecated", "PackageDeprecated", "ChannelDeprecated", "BundleDeprecated"} { + cond, found := getCondition(obj, condType) + if found { + status, _ := cond["status"].(string) + gomega.Expect(status).To(gomega.Equal("False"), + fmt.Sprintf("%s condition should be False, got %s", condType, status)) + } + } + }) + + ginkgo.When("upgrading the operator", func() { + ginkgo.BeforeAll(func() { + if upgradeVersion == "" { + ginkgo.Skip("No --upgrade-version specified, skipping upgrade tests") + } + }) + + ginkgo.It("should upgrade the ClusterExtension to the target version", func() { + ginkgo.By(fmt.Sprintf("Patching ClusterExtension version to %s", upgradeVersion)) + obj, err := getClusterExtension(ctx, packageName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + previousBundleName, previousVersion, _ := getInstalledBundle(obj) + log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion) + + patch := []byte(fmt.Sprintf(`{"spec":{"source":{"catalog":{"version":"%s","upgradeConstraintPolicy":"SelfCertified"}}}}`, upgradeVersion)) + _, err = dynamicClient.Resource(clusterExtensionGVR).Patch(ctx, packageName, types.MergePatchType, patch, metav1.PatchOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Patched ClusterExtension version to %s", upgradeVersion) + + ginkgo.By("Waiting for upgrade to complete") + gomega.Eventually(func() string { + updated, err := getClusterExtension(ctx, packageName) + if err != nil { + return "" + } + + cond, found := getCondition(updated, "Installed") + if !found { + return "" + } + status, _ := cond["status"].(string) + if status != "True" { + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + log.Printf("Installed condition: status=%s reason=%s message=%s", status, reason, message) + return "" + } + + _, bundleVer, found := getInstalledBundle(updated) + if !found { + return "" + } + log.Printf("Installed bundle version: %s", bundleVer) + return bundleVer + }, 10*time.Minute, 10*time.Second).ShouldNot(gomega.Equal(previousVersion), + "Installed bundle version should change after upgrade") + + ginkgo.By("Verifying controller-manager pod is running after upgrade") + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: managerLabelSelector, + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + return true, nil + } + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue()) + }) + }) +}) + diff --git a/tests/olmv1/olmv1_migrate_test.go b/tests/olmv1/olmv1_migrate_test.go new file mode 100644 index 00000000000..a8dad7908ca --- /dev/null +++ b/tests/olmv1/olmv1_migrate_test.go @@ -0,0 +1,402 @@ +// OLMv0→OLMv1 migration test: validates migrating a running OLMv0 OADP install +// (Subscription + CSV) to OLMv1 (ClusterExtension). +// +// Run locally: +// make deploy-olm # install via OLMv0 +// make test-upgrade-v0-to-olmv1 OLMV1_CATALOG_IMAGE= # run migration test +// +// Run in CI (Prow presubmit with optional-operators-subscribe workflow): +// Prow creates CatalogSource + Subscription in openshift-adp from the CI-built +// index image. This test auto-detects the CatalogSource image from the Subscription's +// spec.source/spec.sourceNamespace, so no OLMV1_CATALOG_IMAGE is needed. +// +// The test: +// 1. Captures OLMv0 CSV version before migration +// 2. Removes Subscriptions, CSVs, OperatorGroup, CatalogSources, orphaned CRDs, +// and olm.managed=true remnant resources +// 3. Creates a ClusterCatalog from the detected (or provided) catalog image +// 4. Installs OADP via OLMv1 ClusterExtension +// 5. Verifies the installed version matches the pre-migration OLMv0 version +// 6. Verifies the bundle came from the expected catalog (not a default/community one) +package olmv1_test + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + subscriptionGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "subscriptions", + } + csvGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "clusterserviceversions", + } + operatorGroupGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1", + Resource: "operatorgroups", + } + catalogSourceGVR = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "catalogsources", + } + + migratedCatalogImage string + migratedCatalogSourceName string + migratedCatalogSourceNS string + olmv0InstalledVersion string +) + +var _ = ginkgo.Describe("OADP OLMv0 to OLMv1 migration", ginkgo.Ordered, ginkgo.Label("olmv1-migrate"), func() { + ctx := context.Background() + + ginkgo.BeforeAll(func() { + if !migrate { + ginkgo.Skip("Migration tests disabled (pass -migrate=true to enable)") + } + + ginkgo.By("Verifying OLMv0 resources exist before migration") + subs, err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil || len(subs.Items) == 0 { + ginkgo.Skip(fmt.Sprintf("No OLMv0 Subscription found in %s — run 'make deploy-olm' first", namespace)) + } + for _, sub := range subs.Items { + log.Printf("Found OLMv0 Subscription: %s", sub.GetName()) + } + + ginkgo.By("Capturing OLMv0 installed version from CSV") + csvs, err := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err == nil { + for _, csv := range csvs.Items { + csvVersion, _, _ := unstructured.NestedString(csv.Object, "spec", "version") + if csvVersion != "" { + olmv0InstalledVersion = csvVersion + log.Printf("OLMv0 installed CSV: %s version: %s", csv.GetName(), csvVersion) + break + } + } + } + + ginkgo.By("Capturing CatalogSource for ClusterCatalog migration") + migratedCatalogImage = catalogImage + if migratedCatalogImage == "" { + migratedCatalogImage, migratedCatalogSourceName, migratedCatalogSourceNS = detectCatalogSource(ctx, subs.Items) + } + if migratedCatalogImage != "" { + log.Printf("Will create ClusterCatalog from CatalogSource image: %s", migratedCatalogImage) + if migratedCatalogSourceName != "" { + log.Printf("CatalogSource to clean up: %s/%s", migratedCatalogSourceNS, migratedCatalogSourceName) + } + } else { + log.Print("No custom CatalogSource detected — will rely on default ClusterCatalogs") + } + }) + + ginkgo.It("should remove OLMv0 Subscriptions", func() { + subs, err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, sub := range subs.Items { + ginkgo.By(fmt.Sprintf("Deleting Subscription %s", sub.GetName())) + err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).Delete(ctx, sub.GetName(), metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + gomega.Eventually(func(g gomega.Gomega) { + list, err := dynamicClient.Resource(subscriptionGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(list.Items).To(gomega.BeEmpty()) + }, 1*time.Minute, 5*time.Second).Should(gomega.Succeed()) + }) + + ginkgo.It("should remove OLMv0 CSVs", func() { + csvs, err := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, csv := range csvs.Items { + name := csv.GetName() + ginkgo.By(fmt.Sprintf("Deleting CSV %s", name)) + err := dynamicClient.Resource(csvGVR).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + gomega.Eventually(func(g gomega.Gomega) { + list, err := dynamicClient.Resource(csvGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(list.Items).To(gomega.BeEmpty()) + }, 2*time.Minute, 5*time.Second).Should(gomega.Succeed()) + }) + + ginkgo.It("should remove OLMv0 OperatorGroup and CatalogSource", func() { + ogs, err := dynamicClient.Resource(operatorGroupGVR).Namespace(namespace).List(ctx, metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, og := range ogs.Items { + ginkgo.By(fmt.Sprintf("Deleting OperatorGroup %s", og.GetName())) + err := dynamicClient.Resource(operatorGroupGVR).Namespace(namespace).Delete(ctx, og.GetName(), metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + } + + ginkgo.By("Deleting CatalogSources used by the operator") + if migratedCatalogSourceName != "" && migratedCatalogSourceNS != "" { + ginkgo.By(fmt.Sprintf("Deleting CatalogSource %s/%s (detected from Subscription)", migratedCatalogSourceNS, migratedCatalogSourceName)) + _ = dynamicClient.Resource(catalogSourceGVR).Namespace(migratedCatalogSourceNS).Delete(ctx, migratedCatalogSourceName, metav1.DeleteOptions{}) + } + for _, csNS := range []string{"openshift-marketplace", namespace} { + csList, err := dynamicClient.Resource(catalogSourceGVR).Namespace(csNS).List(ctx, metav1.ListOptions{}) + if err != nil { + continue + } + for _, cs := range csList.Items { + name := cs.GetName() + if isDefaultCatalogSource(name) { + continue + } + ginkgo.By(fmt.Sprintf("Deleting CatalogSource %s/%s", csNS, name)) + _ = dynamicClient.Resource(catalogSourceGVR).Namespace(csNS).Delete(ctx, name, metav1.DeleteOptions{}) + } + } + }) + + ginkgo.It("should clean orphaned OADP/Velero CRDs", func() { + ginkgo.By("Deleting orphaned CRDs that OLMv1 cannot adopt") + cleanupOrphanedCRDs(ctx) + }) + + ginkgo.It("should clean OLMv0 remnant resources that OLMv1 cannot adopt", func() { + olmSelector := metav1.ListOptions{LabelSelector: "olm.managed=true"} + + ginkgo.By("Deleting OLMv0-managed namespace-scoped resources") + if sas, err := kubeClient.CoreV1().ServiceAccounts(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list ServiceAccounts: %v", err) + } else { + for _, sa := range sas.Items { + log.Printf("Deleting remnant ServiceAccount %s/%s", namespace, sa.Name) + _ = kubeClient.CoreV1().ServiceAccounts(namespace).Delete(ctx, sa.Name, metav1.DeleteOptions{}) + } + } + if roles, err := kubeClient.RbacV1().Roles(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list Roles: %v", err) + } else { + for _, r := range roles.Items { + log.Printf("Deleting remnant Role %s/%s", namespace, r.Name) + _ = kubeClient.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}) + } + } + if rbs, err := kubeClient.RbacV1().RoleBindings(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list RoleBindings: %v", err) + } else { + for _, rb := range rbs.Items { + log.Printf("Deleting remnant RoleBinding %s/%s", namespace, rb.Name) + _ = kubeClient.RbacV1().RoleBindings(namespace).Delete(ctx, rb.Name, metav1.DeleteOptions{}) + } + } + if deploys, err := kubeClient.AppsV1().Deployments(namespace).List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list Deployments: %v", err) + } else { + for _, d := range deploys.Items { + log.Printf("Deleting remnant Deployment %s/%s", namespace, d.Name) + _ = kubeClient.AppsV1().Deployments(namespace).Delete(ctx, d.Name, metav1.DeleteOptions{}) + } + } + + ginkgo.By("Deleting OLMv0-managed cluster-scoped resources related to OADP") + if crs, err := kubeClient.RbacV1().ClusterRoles().List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list ClusterRoles: %v", err) + } else { + for _, cr := range crs.Items { + if !isOADPRelatedResource(cr.Name, namespace) { + continue + } + log.Printf("Deleting remnant ClusterRole %s", cr.Name) + _ = kubeClient.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}) + } + } + if crbs, err := kubeClient.RbacV1().ClusterRoleBindings().List(ctx, olmSelector); err != nil { + log.Printf("Warning: failed to list ClusterRoleBindings: %v", err) + } else { + for _, crb := range crbs.Items { + if !isOADPRelatedResource(crb.Name, namespace) { + continue + } + log.Printf("Deleting remnant ClusterRoleBinding %s", crb.Name) + _ = kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}) + } + } + }) + + ginkgo.It("should create ClusterCatalog from migrated CatalogSource image", func() { + if migratedCatalogImage == "" { + ginkgo.Skip("No custom catalog image to migrate — using default ClusterCatalogs") + } + ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, migratedCatalogImage)) + ensureClusterCatalog(ctx, catalogName, migratedCatalogImage) + waitForClusterCatalogServing(ctx, catalogName) + }) + + ginkgo.It("should install OADP via OLMv1 ClusterExtension", func() { + ginkgo.By("Setting up installer ServiceAccount and RBAC") + ensureNamespace(ctx, namespace) + ensureServiceAccount(ctx, serviceAccountName, namespace) + ensureClusterAdminBinding(ctx, serviceAccountName, namespace) + + ginkgo.By("Cleaning up any existing ClusterExtension from previous runs") + _ = deleteClusterExtension(ctx, packageName) + + ginkgo.By("Creating the ClusterExtension") + var ceOpts []func(map[string]interface{}) + if migratedCatalogImage != "" { + ceOpts = append(ceOpts, withCatalogSelector(catalogName)) + } + ce := buildClusterExtension(packageName, packageName, namespace, serviceAccountName, ceOpts...) + _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Waiting for ClusterExtension to be installed") + terminalReasons := map[string]bool{ + "InvalidConfiguration": true, + "Failed": true, + } + gomega.Eventually(func(g gomega.Gomega) { + obj, err := getClusterExtension(ctx, packageName) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + log.Print("Current conditions:") + logAllConditions(obj) + + progCond, progFound := getCondition(obj, "Progressing") + if progFound { + reason, _ := progCond["reason"].(string) + message, _ := progCond["message"].(string) + g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(), + "ClusterExtension has terminal error: reason=%s message=%s", reason, message) + } + + instCond, instFound := getCondition(obj, "Installed") + g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present") + status, _ := instCond["status"].(string) + g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") + }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) + + ginkgo.By("Verifying installed bundle version and catalog source") + obj, err := getClusterExtension(ctx, packageName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bundleName, bundleVersion, found := getInstalledBundle(obj) + gomega.Expect(found).To(gomega.BeTrue(), "ClusterExtension should have an installed bundle") + log.Printf("OLMv1 installed bundle: name=%s version=%s", bundleName, bundleVersion) + + if olmv0InstalledVersion != "" { + log.Printf("Version check: OLMv0=%s OLMv1=%s", olmv0InstalledVersion, bundleVersion) + gomega.Expect(bundleVersion).To(gomega.Equal(olmv0InstalledVersion), + "OLMv1 installed version should match OLMv0 version (was %s, got %s)", olmv0InstalledVersion, bundleVersion) + } + + if migratedCatalogImage != "" { + instCond, instFound := getCondition(obj, "Installed") + gomega.Expect(instFound).To(gomega.BeTrue()) + installedMsg, _ := instCond["message"].(string) + log.Printf("Installed condition message: %s", installedMsg) + gomega.Expect(installedMsg).NotTo(gomega.ContainSubstring("openshift-community-operators"), + "Bundle should NOT come from community catalog — expected custom catalog %s", catalogName) + } + }) + + ginkgo.It("should have controller-manager pod running after migration", func() { + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "control-plane=controller-manager", + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + log.Printf("Controller-manager pod %s is Running", pod.Name) + return true, nil + } + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running") + }) + + ginkgo.AfterAll(func() { + if !migrate { + return + } + ginkgo.By("Cleaning up migration test resources") + _ = deleteClusterExtension(ctx, packageName) + + gomega.Eventually(func() bool { + _, err := getClusterExtension(ctx, packageName) + return apierrors.IsNotFound(err) + }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) + + if createdCatalog { + deleteClusterCatalog(ctx, catalogName) + } + cleanupClusterRoleBinding(ctx, serviceAccountName) + }) +}) + +// detectCatalogSource finds the custom CatalogSource from the OLMv0 Subscription. +// Returns the image, CatalogSource name, and namespace. The image can be used +// to create a ClusterCatalog (same FBC format, different API). The name and +// namespace are needed for cleanup since CI (optional-operators-subscribe) may +// place the CatalogSource in the operator namespace, not openshift-marketplace. +func detectCatalogSource(ctx context.Context, subs []unstructured.Unstructured) (image, name, ns string) { + for _, sub := range subs { + source, _, _ := unstructured.NestedString(sub.Object, "spec", "source") + sourceNS, _, _ := unstructured.NestedString(sub.Object, "spec", "sourceNamespace") + if source == "" || sourceNS == "" { + continue + } + if isDefaultCatalogSource(source) { + continue + } + cs, err := dynamicClient.Resource(catalogSourceGVR).Namespace(sourceNS).Get(ctx, source, metav1.GetOptions{}) + if err != nil { + log.Printf("Warning: CatalogSource %s/%s not found: %v", sourceNS, source, err) + continue + } + img, _, _ := unstructured.NestedString(cs.Object, "spec", "image") + if img != "" { + log.Printf("Detected CatalogSource %s/%s image: %s", sourceNS, source, img) + return img, source, sourceNS + } + } + return "", "", "" +} + +func isDefaultCatalogSource(name string) bool { + switch name { + case "redhat-operators", "certified-operators", "community-operators", "redhat-marketplace": + return true + } + return false +} + +func isOADPRelatedResource(name, ns string) bool { + return strings.Contains(name, "oadp") || + strings.Contains(name, "velero") || + strings.Contains(name, ns) +} diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go new file mode 100644 index 00000000000..e7888e13e88 --- /dev/null +++ b/tests/olmv1/olmv1_suite_test.go @@ -0,0 +1,402 @@ +package olmv1_test + +import ( + "context" + "flag" + "log" + "strings" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +var ( + namespace string + packageName string + channel string + version string + upgradeVersion string + catalogName string + catalogImage string + serviceAccountName string + artifactDir string + migrate bool + + createdCatalog bool + + kubeClient *kubernetes.Clientset + dynamicClient dynamic.Interface + + clusterExtensionGVR = schema.GroupVersionResource{ + Group: "olm.operatorframework.io", + Version: "v1", + Resource: "clusterextensions", + } + + clusterCatalogGVR = schema.GroupVersionResource{ + Group: "olm.operatorframework.io", + Version: "v1", + Resource: "clustercatalogs", + } +) + +func init() { + flag.StringVar(&namespace, "namespace", "openshift-adp", "Namespace to install the operator into") + flag.StringVar(&packageName, "package", "oadp-operator", "OLM package name for the operator") + flag.StringVar(&channel, "channel", "", "Catalog channel (optional)") + flag.StringVar(&version, "version", "", "Version to install (optional, e.g. '1.5.1' or '1.5.x')") + flag.StringVar(&upgradeVersion, "upgrade-version", "", "Version to upgrade to (optional)") + flag.StringVar(&catalogName, "catalog", "oadp-olmv1-test-catalog", "ClusterCatalog name to create or reference") + flag.StringVar(&catalogImage, "catalog-image", "", "Catalog image to use for creating a ClusterCatalog (required when package is not in default catalogs)") + flag.StringVar(&serviceAccountName, "service-account", "oadp-operator-installer", "ServiceAccount name for ClusterExtension") + flag.StringVar(&artifactDir, "artifact_dir", "/tmp", "Directory for test artifacts") + flag.BoolVar(&migrate, "migrate", false, "Run OLMv0-to-OLMv1 migration tests (expects pre-existing OLMv0 install)") +} + +func TestOADPOLMv1(t *testing.T) { + flag.Parse() + gomega.RegisterFailHandler(ginkgo.Fail) + + kubeConfig := config.GetConfigOrDie() + kubeConfig.QPS = 50 + kubeConfig.Burst = 100 + + var err error + kubeClient, err = kubernetes.NewForConfig(kubeConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + dynamicClient, err = dynamic.NewForConfig(kubeConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.RunSpecs(t, "OADP OLMv1 Suite") +} + +// --- Helpers --- + +func ensureNamespace(ctx context.Context, name string) { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} + _, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created namespace %s", name) +} + +func ensureServiceAccount(ctx context.Context, name, ns string) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + } + _, err := kubeClient.CoreV1().ServiceAccounts(ns).Create(ctx, sa, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ServiceAccount %s/%s", ns, name) +} + +// ensureClusterAdminBinding grants cluster-admin to the installer SA. +// This is intentionally broad for testing; production should use least-privilege RBAC. +func ensureClusterAdminBinding(ctx context.Context, saName, ns string) { + bindingName := saName + "-binding" + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: bindingName}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: saName, Namespace: ns}, + }, + } + _, err := kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ClusterRoleBinding %s", bindingName) +} + +func buildClusterExtension(name, pkg, ns, sa string, opts ...func(map[string]interface{})) *unstructured.Unstructured { + spec := map[string]interface{}{ + "namespace": ns, + "serviceAccount": map[string]interface{}{ + "name": sa, + }, + "source": map[string]interface{}{ + "sourceType": "Catalog", + "catalog": map[string]interface{}{ + "packageName": pkg, + }, + }, + // OwnNamespace operators require watchNamespace to tell OLMv1 + // which namespace the operator should watch. Set it to the + // install namespace so it mirrors OLMv0 OwnNamespace behavior. + "config": map[string]interface{}{ + "configType": "Inline", + "inline": map[string]interface{}{ + "watchNamespace": ns, + }, + }, + } + + ce := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "olm.operatorframework.io/v1", + "kind": "ClusterExtension", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": spec, + }, + } + + catalogSpec := spec["source"].(map[string]interface{})["catalog"].(map[string]interface{}) + if catalogImage != "" { + catalogSpec["selector"] = map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "olm.operatorframework.io/metadata.name": catalogName, + }, + } + } + if channel != "" { + catalogSpec["channels"] = []interface{}{channel} + } + if version != "" { + catalogSpec["version"] = version + } + for _, opt := range opts { + opt(catalogSpec) + } + + return ce +} + +func withCatalogSelector(catalog string) func(map[string]interface{}) { + return func(catalogSpec map[string]interface{}) { + catalogSpec["selector"] = map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "olm.operatorframework.io/metadata.name": catalog, + }, + } + } +} + +func getClusterExtension(ctx context.Context, name string) (*unstructured.Unstructured, error) { + return dynamicClient.Resource(clusterExtensionGVR).Get(ctx, name, metav1.GetOptions{}) +} + +func deleteClusterExtension(ctx context.Context, name string) error { + err := dynamicClient.Resource(clusterExtensionGVR).Delete(ctx, name, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + return err +} + +func getCondition(obj *unstructured.Unstructured, condType string) (map[string]interface{}, bool) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil || !found { + return nil, false + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + if cond["type"] == condType { + return cond, true + } + } + return nil, false +} + +func logAllConditions(obj *unstructured.Unstructured) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil || !found { + log.Print(" No conditions present yet") + return + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + condType, _ := cond["type"].(string) + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + if len(message) > 120 { + message = message[:120] + "..." + } + log.Printf(" %s: status=%s reason=%s message=%s", condType, status, reason, message) + } +} + +func getInstalledBundle(obj *unstructured.Unstructured) (name string, ver string, found bool) { + bundleName, _, _ := unstructured.NestedString(obj.Object, "status", "install", "bundle", "name") + bundleVersion, _, _ := unstructured.NestedString(obj.Object, "status", "install", "bundle", "version") + if bundleName != "" { + return bundleName, bundleVersion, true + } + return "", "", false +} + +func crdExists(ctx context.Context, name string) (bool, error) { + crdGVR := schema.GroupVersionResource{ + Group: apiextensionsv1.SchemeGroupVersion.Group, + Version: apiextensionsv1.SchemeGroupVersion.Version, + Resource: "customresourcedefinitions", + } + _, err := dynamicClient.Resource(crdGVR).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func cleanupClusterRoleBinding(ctx context.Context, saName string) { + bindingName := saName + "-binding" + err := kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, bindingName, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete ClusterRoleBinding %s: %v", bindingName, err) + } +} + +// cleanupOrphanedCRDs deletes any OADP or Velero CRDs left behind by a +// previous OLMv0 deployment or a prior test run. OLMv1 cannot adopt CRDs +// it did not create, so these must be removed before a fresh install. +func cleanupOrphanedCRDs(ctx context.Context) { + crdGVR := schema.GroupVersionResource{ + Group: apiextensionsv1.SchemeGroupVersion.Group, + Version: apiextensionsv1.SchemeGroupVersion.Version, + Resource: "customresourcedefinitions", + } + crdList, err := dynamicClient.Resource(crdGVR).List(ctx, metav1.ListOptions{}) + if err != nil { + log.Printf("Warning: failed to list CRDs: %v", err) + return + } + var deletedNames []string + for _, crd := range crdList.Items { + name := crd.GetName() + if strings.HasSuffix(name, ".oadp.openshift.io") || strings.HasSuffix(name, ".velero.io") { + if err := dynamicClient.Resource(crdGVR).Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete CRD %s: %v", name, err) + } else { + deletedNames = append(deletedNames, name) + } + } + } + if len(deletedNames) > 0 { + log.Printf("Deleted %d orphaned OADP/Velero CRDs, waiting for removal", len(deletedNames)) + for _, name := range deletedNames { + gomega.Eventually(func() bool { + _, err := dynamicClient.Resource(crdGVR).Get(ctx, name, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue(), + "CRD %s should be fully removed", name) + } + log.Print("All orphaned CRDs fully removed") + } +} + +func ensureClusterCatalog(ctx context.Context, name, image string) { + cc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "olm.operatorframework.io/v1", + "kind": "ClusterCatalog", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": map[string]interface{}{ + "source": map[string]interface{}{ + "type": "Image", + "image": map[string]interface{}{ + "ref": image, + }, + }, + }, + }, + } + _, err := dynamicClient.Resource(clusterCatalogGVR).Create(ctx, cc, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + existing, getErr := dynamicClient.Resource(clusterCatalogGVR).Get(ctx, name, metav1.GetOptions{}) + if getErr == nil { + existingImage, _, _ := unstructured.NestedString(existing.Object, "spec", "source", "image", "ref") + log.Printf("ClusterCatalog %s already exists with image %s (expected %s)", name, existingImage, image) + gomega.Expect(existingImage).To(gomega.Equal(image), + "Existing ClusterCatalog %s has image %s but expected %s — delete it first or use matching image", name, existingImage, image) + } + createdCatalog = true + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + createdCatalog = true + log.Printf("Created ClusterCatalog %s with image %s", name, image) +} + +func waitForClusterCatalogServing(ctx context.Context, name string) { + gomega.Eventually(func() bool { + obj, err := dynamicClient.Resource(clusterCatalogGVR).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + log.Printf("Error getting ClusterCatalog %s: %v", name, err) + return false + } + conditions, found, _ := unstructured.NestedSlice(obj.Object, "status", "conditions") + if !found { + return false + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + condType, _ := cond["type"].(string) + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + switch condType { + case "Serving": + log.Printf("ClusterCatalog %s Serving: status=%s reason=%s", name, status, reason) + if status != "True" && message != "" { + log.Printf(" message: %s", message) + } + return status == "True" + case "Progressing": + if reason == "Failed" || status == "False" { + imageRef, _, _ := unstructured.NestedString(obj.Object, "spec", "source", "image", "ref") + log.Printf("ClusterCatalog %s Progressing: status=%s reason=%s (image: %s)", name, status, reason, imageRef) + if message != "" { + log.Printf(" message: %s", message) + } + } + } + } + return false + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), + "ClusterCatalog %s should be Serving — if using ttl.sh, the catalog image may have expired", name) +} + +func deleteClusterCatalog(ctx context.Context, name string) { + err := dynamicClient.Resource(clusterCatalogGVR).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete ClusterCatalog %s: %v", name, err) + } +}