From bed64018335241b4ca59fe03829295eb170a14af Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 14 Jun 2023 14:28:44 -0700 Subject: [PATCH 01/52] Added changeAnnotation method --- pkg/splunk/enterprise/licensemanager.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 83a1048dd..79f3bad35 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -26,6 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -222,3 +223,7 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } + +func changeAnnotations(ctx context.Context, c client.Client, meta metav1.ObjectMeta) error { + return nil +} From 7de6f36459a187208acc142d6a5e07452761da99 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 10:28:17 -0700 Subject: [PATCH 02/52] Refined changeClusterManagerAnnotations --- pkg/splunk/enterprise/licensemanager.go | 60 ++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 79f3bad35..b28bbdeac 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -179,6 +179,11 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, if !result.Requeue { result.RequeueAfter = 0 } + + err = changeClusterManagerAnnotations(ctx, client, cr) + if err != nil { + return result, err + } return result, nil } @@ -224,6 +229,57 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } -func changeAnnotations(ctx context.Context, c client.Client, meta metav1.ObjectMeta) error { +// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { + +// namespacedName := types.NamespacedName{ +// Namespace: cr.GetNamespace(), +// Name: cr.Spec.ClusterManagerRef.Name, +// } +// clusterManagerInstance := &enterpriseApi.ClusterManager{} +// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) +// if err != nil && k8serrors.IsNotFound(err) { +// return false, nil +// } +// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { +// return true, nil +// } + +// return true, err + +// } + +// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + return nil + } From f4453a178703e3196095b9eb6c64836cc47d2c4b Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 15 Jun 2023 15:05:56 -0700 Subject: [PATCH 03/52] test case for upgrade scenario --- kuttl/kuttl-test-helm-upgrade.yaml | 10 + .../upgrade/c3-with-operator/00-assert.yaml | 9 + .../c3-with-operator/00-install-c3.yaml | 6 + .../upgrade/c3-with-operator/01-assert.yaml | 100 +++++++++ .../upgrade/c3-with-operator/02-assert.yaml | 17 ++ .../upgrade/c3-with-operator/03-assert.yaml | 17 ++ .../03-upgrade-splunk-image.yaml | 6 + .../upgrade/c3-with-operator/04-assert.yaml | 196 ++++++++++++++++++ .../upgrade/c3-with-operator/c3_config.yaml | 50 +++++ 9 files changed, 411 insertions(+) create mode 100644 kuttl/kuttl-test-helm-upgrade.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/00-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/01-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/02-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/04-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/c3_config.yaml diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml new file mode 100644 index 000000000..d8ecc7336 --- /dev/null +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -0,0 +1,10 @@ +# Entrypoint for helm automation +apiVersion: kuttl.dev/v1beta1 +kind: TestSuite +testDirs: +- ./kuttl/tests/upgrade +parallel: 3 +timeout: 5000 +startKIND: false +artifactsDir: kuttl-artifacts +kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml new file mode 100644 index 000000000..142b71272 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml @@ -0,0 +1,9 @@ +--- +# assert for splunk operator deployment to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml new file mode 100644 index 000000000..d5a3330d1 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml new file mode 100644 index 000000000..dce36af8b --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -0,0 +1,100 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml new file mode 100644 index 000000000..84b4ee495 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -0,0 +1,17 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..84b4ee495 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,17 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml new file mode 100644 index 000000000..a11eefac7 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml new file mode 100644 index 000000000..9938285c4 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -0,0 +1,196 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-cm-cluster-manager-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-deployer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml new file mode 100644 index 000000000..fd00ad06d --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml @@ -0,0 +1,50 @@ +splunk-operator: + enabled: true + splunkOperator: + clusterWideAccess: false + +sva: + c3: + enabled: true + + clusterManager: + name: cm + + indexerClusters: + - name: idxc + + searchHeadClusters: + - name: shc + + +indexerCluster: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + + serviceTemplate: + spec: + type: LoadBalancer + +clusterManager: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" + +searchHeadCluster: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" From 32c385daec524b058f55ca16e4cea1dde8d6ea66 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 21 Jun 2023 10:31:37 -0700 Subject: [PATCH 04/52] Modified kuttl cases --- Makefile | 2 +- kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../c3-with-operator/00-install-c3.yaml | 2 +- .../upgrade/c3-with-operator/01-assert.yaml | 93 +-------- ...mage.yaml => 01-upgrade-splunk-image.yaml} | 0 .../upgrade/c3-with-operator/02-assert.yaml | 17 -- .../upgrade/c3-with-operator/03-assert.yaml | 17 -- .../upgrade/c3-with-operator/04-assert.yaml | 196 ------------------ pkg/splunk/enterprise/licensemanager.go | 19 -- 9 files changed, 4 insertions(+), 344 deletions(-) rename kuttl/tests/upgrade/c3-with-operator/{03-upgrade-splunk-image.yaml => 01-upgrade-splunk-image.yaml} (100%) delete mode 100644 kuttl/tests/upgrade/c3-with-operator/02-assert.yaml delete mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml delete mode 100644 kuttl/tests/upgrade/c3-with-operator/04-assert.yaml diff --git a/Makefile b/Makefile index dd59513ae..aef47f310 100644 --- a/Makefile +++ b/Makefile @@ -137,7 +137,7 @@ build: setup/ginkgo manifests generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go -docker-build: test ## Build docker image with the manager. +docker-build: #test ## Build docker image with the manager. docker build -t ${IMG} . docker-push: ## Push docker image with the manager. diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index d8ecc7336..a152a8423 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 5000 +timeout: 500 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml index d5a3330d1..a10c31557 100644 --- a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} + - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} --namespace ${NAMESPACE} namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml index dce36af8b..4b09ebf54 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -6,95 +6,4 @@ metadata: name: splunk-operator-controller-manager status: readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 + availableReplicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml similarity index 100% rename from kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml rename to kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml deleted file mode 100644 index 84b4ee495..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful sets and replicas updated -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml deleted file mode 100644 index 84b4ee495..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful sets and replicas updated -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml deleted file mode 100644 index 9938285c4..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml +++ /dev/null @@ -1,196 +0,0 @@ ---- -# assert for splunk operator pod to be ready -apiVersion: apps/v1 -kind: Deployment -metadata: - name: splunk-operator-controller-manager -status: - readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-cm-cluster-manager-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-deployer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true \ No newline at end of file diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index b28bbdeac..06ca95316 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -229,25 +229,6 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } -// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { - -// namespacedName := types.NamespacedName{ -// Namespace: cr.GetNamespace(), -// Name: cr.Spec.ClusterManagerRef.Name, -// } -// clusterManagerInstance := &enterpriseApi.ClusterManager{} -// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) -// if err != nil && k8serrors.IsNotFound(err) { -// return false, nil -// } -// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { -// return true, nil -// } - -// return true, err - -// } - // changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { From 8efb4565cabd2723d1b7400c919bb5c1cf8cdd14 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 10:59:44 -0700 Subject: [PATCH 05/52] Added kuttl tests; Updated LicenseMaster --- env.sh | 8 ++++ kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../01-upgrade-splunk-image.yaml | 2 +- .../upgrade/c3-with-operator/02-assert.yaml | 24 +++++++++++ .../upgrade/c3-with-operator/03-assert.yaml | 40 +++++++++++++++++++ .../upgrade/c3-with-operator/04-assert.yaml | 24 +++++++++++ .../c3-with-operator/05-uninstall-c3.yaml | 5 +++ pkg/splunk/enterprise/licensemaster.go | 37 +++++++++++++++++ 8 files changed, 140 insertions(+), 2 deletions(-) create mode 100755 env.sh create mode 100644 kuttl/tests/upgrade/c3-with-operator/02-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/04-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml diff --git a/env.sh b/env.sh new file mode 100755 index 000000000..5a20de2e2 --- /dev/null +++ b/env.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export NAMESPACE=test +export HELM_REPO_PATH=../../../../helm-chart +export KUTTL_SPLUNK_OPERATOR_IMAGE=docker.io/tgarg1701/splunk-operator:2.4.0 +export KUTTL_SPLUNK_ENTERPRISE_IMAGE=docker.io/splunk/splunk:9.0.3-a2 +export KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE=docker.io/splunk/splunk:9.0.5 +export AWS_DEFAULT_REGION=us-west-2 diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index a152a8423..d8ecc7336 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 500 +timeout: 5000 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml index a11eefac7..f5689caa5 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} --namespace ${NAMESPACE} namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml new file mode 100644 index 000000000..731366343 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -0,0 +1,24 @@ +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..c3c560798 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,40 @@ +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml new file mode 100644 index 000000000..4d5aadaf4 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -0,0 +1,24 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml new file mode 100644 index 000000000..0a24c9a2d --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-c3 -n --namespace ${NAMESPACE} + namespaced: true diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 8ff920be8..3c3506886 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -225,3 +226,39 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } + +// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} + err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterMasterInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image + + clusterMasterInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterMasterInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From 3c75c998b8aa555156e6df3323d625b61b3b9bef Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 14:53:38 -0700 Subject: [PATCH 06/52] Fixed uninstall kuttl test --- kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml index 0a24c9a2d..abb75c68d 100644 --- a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -1,5 +1,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm uninstall splunk-c3 -n --namespace ${NAMESPACE} + - command: helm uninstall splunk-c3--namespace ${NAMESPACE} namespaced: true From ee474fc417ecb984454b6e6b32223b5ebf5466ba Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 16:40:45 -0700 Subject: [PATCH 07/52] Fixed unit test --- pkg/splunk/enterprise/clustermanager.go | 35 ++++++++++++++++++ pkg/splunk/enterprise/licensemanager.go | 37 -------------------- pkg/splunk/enterprise/licensemanager_test.go | 14 +++++++- 3 files changed, 48 insertions(+), 38 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index d45206475..0967620bb 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -31,6 +31,7 @@ import ( splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -434,3 +435,37 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, } return extraEnv, err } + +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 06ca95316..828a169d5 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -26,7 +26,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -228,39 +227,3 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } - -// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image - - clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index dbdaf153c..a476e202d 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -57,6 +57,7 @@ func TestApplyLicenseManager(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-stack1-license-manager-secret-v1"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, + {MetaName: "*v4.ClusterManager-test-"}, {MetaName: "*v4.LicenseManager-test-stack1"}, {MetaName: "*v4.LicenseManager-test-stack1"}, } @@ -73,7 +74,7 @@ func TestApplyLicenseManager(t *testing.T) { {ListOpts: listOpts}} createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} + updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12], funcCalls[13]} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} current := enterpriseApi.LicenseManager{ TypeMeta: metav1.TypeMeta{ @@ -719,6 +720,17 @@ func TestLicenseManagerList(t *testing.T) { } } +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + lm := enterpriseApi.LicenseManager{} + + client := spltest.NewMockClient() + err := changeClusterManagerAnnotations(ctx, client, &lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } +} + func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} From 35a2eb06ed71fb2e9a53cdd0cd253f4d69a94aee Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 14:40:57 -0700 Subject: [PATCH 08/52] Removed changeAnnotation from licenseMaster --- pkg/splunk/enterprise/clustermanager.go | 2 ++ pkg/splunk/enterprise/licensemaster.go | 37 ------------------------- 2 files changed, 2 insertions(+), 37 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 0967620bb..5d6dd6e1d 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -436,6 +436,8 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } +// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { namespacedName := types.NamespacedName{ diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 3c3506886..8ff920be8 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,7 +25,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -226,39 +225,3 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } - -// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} - err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterMasterInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image - - clusterMasterInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterMasterInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} From d9540fd7bf62dd9abbdb5ae36fa10c5c2ac2cf55 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 15:11:11 -0700 Subject: [PATCH 09/52] Added branch in int-tests --- .github/workflows/helm-test-workflow.yml | 1 + .github/workflows/int-test-workflow.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index e68dc44d7..d2e9b7aff 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,6 +2,7 @@ name: Helm Test WorkFlow on: push: branches: + - CSPL-2094-LM-upgrade-strategy - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 3dd4eed22..25a85105a 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,6 +2,7 @@ name: Integration Test WorkFlow on: push: branches: + - CSPL-2094-LM-upgrade-strategy - develop - main - feature** From 5a17a5ffe71d54d75d5c97e2e53fddde0b98e7e3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 16:42:05 -0700 Subject: [PATCH 10/52] Completed code coverage tests --- pkg/splunk/enterprise/clustermanager.go | 12 +++- pkg/splunk/enterprise/clustermanager_test.go | 61 ++++++++++++++++++++ pkg/splunk/enterprise/licensemanager_test.go | 11 ---- 3 files changed, 70 insertions(+), 14 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 5d6dd6e1d..7d9db1a62 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -436,19 +436,24 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// changeClusterMasterAnnotations updates the checkUpdateImage field of the Cluster Master Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name, } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + err := client.Get(ctx, namespacedName, clusterManagerInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + + // fetch and check the annotation fields of the ClusterManager annotations := clusterManagerInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -459,12 +464,13 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr } } + // create/update the checkUpdateImage annotation field annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image clusterManagerInstance.SetAnnotations(annotations) err = client.Update(ctx, clusterManagerInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 190e23f75..6c994f00b 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1384,6 +1384,67 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + lm := &enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lm", + Namespace: "default", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test-cm", + }, + }, + }, + } + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + cm.Spec.Image = "splunk/splunk:latest" + + client := spltest.NewMockClient() + + client.Create(ctx, lm) + client.Create(ctx, cm) + + err := changeClusterManagerAnnotations(ctx, client, lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, clusterManager) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + annotations := clusterManager.GetAnnotations() + if annotations["checkUpdateImage"] != cm.Spec.Image { + t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + } + +} + func TestClusterManagerWitReadyState(t *testing.T) { // create directory for app framework newpath := filepath.Join("/tmp", "appframework") diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index a476e202d..8c7d597c9 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -720,17 +720,6 @@ func TestLicenseManagerList(t *testing.T) { } } -func TestChangeClusterManagerAnnotations(t *testing.T) { - ctx := context.TODO() - lm := enterpriseApi.LicenseManager{} - - client := spltest.NewMockClient() - err := changeClusterManagerAnnotations(ctx, client, &lm) - if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) - } -} - func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} From 706dc96aa9c9a323fe333a940bc89ec6ed534605 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 13:22:58 -0700 Subject: [PATCH 11/52] Added upgradeScenario and related methods for CM --- pkg/splunk/enterprise/clustermanager.go | 41 +++++++++++++++++++++++++ pkg/splunk/enterprise/licensemanager.go | 9 ++++++ 2 files changed, 50 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 7d9db1a62..dad71072b 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -178,6 +178,14 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } + checkUpgradeReady, err := upgradeScenario(ctx, client, cr) + if err != nil { + return result, err + } + if !checkUpgradeReady { + return result, err + } + clusterManagerManager := splctrl.DefaultStatefulSetPodManager{} phase, err := clusterManagerManager.Update(ctx, client, statefulSet, 1) if err != nil { @@ -475,5 +483,38 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr } return nil +} + +func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { + + licenseManagerRef := cr.Spec.LicenseManagerRef + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} + + // create new object + licenseManager := &enterpriseApi.LicenseManager{} + + // get the license manager referred in cluster manager + err := c.Get(ctx, namespacedName, licenseManager) + if err != nil { + return false, err + } + + lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) + cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + + if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { + return true, nil + } + + return false, nil +} + +func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { + statefulSet, err := getClusterManagerStatefulSet(ctx, c, cr) + if err != nil { + return "", err + } + image := statefulSet.Spec.Template.Spec.InitContainers[0].Image + return image, nil } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 828a169d5..a274e4914 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -227,3 +227,12 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } +func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { + statefulSet, err := getLicenseManagerStatefulSet(ctx, c, cr) + if err != nil { + return "", err + } + image := statefulSet.Spec.Template.Spec.InitContainers[0].Image + + return image, nil +} From c5af670586bf74ce306ffcbbeb4579d1511d9c7b Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 15:43:57 -0700 Subject: [PATCH 12/52] Added label selectors to get Current Image --- pkg/splunk/enterprise/clustermanager.go | 40 +++++++++++++++++++++++-- pkg/splunk/enterprise/licensemanager.go | 38 +++++++++++++++++++++-- 2 files changed, 72 insertions(+), 6 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index dad71072b..53494167e 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -32,7 +33,10 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -510,11 +514,41 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente } func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { - statefulSet, err := getClusterManagerStatefulSet(ctx, c, cr) + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) if err != nil { return "", err } - image := statefulSet.Spec.Template.Spec.InitContainers[0].Image + labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) + if err != nil { + return "", err + } + + statefulsetPods := &corev1.PodList{} + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + rclient.MatchingLabelsSelector{Selector: labelSelector}, + } + + err = c.List(ctx, statefulsetPods, opts...) + if err != nil { + return "", err + } + + for _, v := range statefulsetPods.Items { + for _, container := range v.Spec.Containers { + if strings.Contains(container.Name, "splunk") { + image := container.Image + return image, nil + } + + } + } - return image, nil + return "", nil } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index a274e4914..b81241892 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -26,8 +27,10 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -228,11 +231,40 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { - statefulSet, err := getLicenseManagerStatefulSet(ctx, c, cr) + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil { + return "", err + } + labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) if err != nil { return "", err } - image := statefulSet.Spec.Template.Spec.InitContainers[0].Image - return image, nil + statefulsetPods := &corev1.PodList{} + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + rclient.MatchingLabelsSelector{Selector: labelSelector}, + } + + err = c.List(ctx, statefulsetPods, opts...) + if err != nil { + return "", err + } + + for _, v := range statefulsetPods.Items { + for _, container := range v.Spec.Containers { + if strings.Contains(container.Name, "splunk") { + image := container.Image + return image, nil + } + + } + } + + return "", nil } From 237ecdf7d3e3dd7e57ea0b855b4091833f0ad0ba Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 16:24:18 -0700 Subject: [PATCH 13/52] Changed pod.Spec to pod.Status --- pkg/splunk/enterprise/clustermanager.go | 2 +- pkg/splunk/enterprise/licensemanager.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 53494167e..44fb43f60 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -541,7 +541,7 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl } for _, v := range statefulsetPods.Items { - for _, container := range v.Spec.Containers { + for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { image := container.Image return image, nil diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index b81241892..3f99e56de 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -257,7 +257,7 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl } for _, v := range statefulsetPods.Items { - for _, container := range v.Spec.Containers { + for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { image := container.Image return image, nil From 5966d8726df92f7b1652e4fe8d0b8b9132149078 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 21 Jun 2023 12:17:17 -0700 Subject: [PATCH 14/52] Added changeAnnotations for MC --- pkg/splunk/enterprise/clustermanager.go | 5 +++ pkg/splunk/enterprise/monitoringconsole.go | 36 ++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 44fb43f60..6cb60e281 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -506,6 +506,7 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + // check conditions for upgrade if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { return true, nil } @@ -513,6 +514,8 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente return false, nil } +// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, +// returns the image, and error if something goes wring func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { namespacedName := types.NamespacedName{ @@ -529,6 +532,7 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", err } + // get a list of all pods in the namespace with matching labels as the statefulset statefulsetPods := &corev1.PodList{} opts := []rclient.ListOption{ rclient.InNamespace(cr.GetNamespace()), @@ -540,6 +544,7 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", err } + // find the container with the phrase 'splunk' in it for _, v := range statefulsetPods.Items { for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 8979877de..a1fb3e2b9 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -355,3 +355,39 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } } + +// changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.MonitoringConsoleRef.Name, + } + monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} + err := client.Get(context.TODO(), namespacedName, monitoringConsoleInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := monitoringConsoleInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == monitoringConsoleInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = monitoringConsoleInstance.Spec.Image + + monitoringConsoleInstance.SetAnnotations(annotations) + err = client.Update(ctx, monitoringConsoleInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From b827bd9fe73d4ecacc6ebfd459fa0540821e4dee Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 14:27:00 -0700 Subject: [PATCH 15/52] Added kuttl test cases --- env.sh | 7 ------- kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 | 5 +++++ 2 files changed, 5 insertions(+), 7 deletions(-) mode change 100755 => 100644 env.sh create mode 100644 kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 diff --git a/env.sh b/env.sh old mode 100755 new mode 100644 index 5a20de2e2..f1f641af1 --- a/env.sh +++ b/env.sh @@ -1,8 +1 @@ #!/usr/bin/env bash - -export NAMESPACE=test -export HELM_REPO_PATH=../../../../helm-chart -export KUTTL_SPLUNK_OPERATOR_IMAGE=docker.io/tgarg1701/splunk-operator:2.4.0 -export KUTTL_SPLUNK_ENTERPRISE_IMAGE=docker.io/splunk/splunk:9.0.3-a2 -export KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE=docker.io/splunk/splunk:9.0.5 -export AWS_DEFAULT_REGION=us-west-2 diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 new file mode 100644 index 000000000..95f8297ca --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} + namespaced: true \ No newline at end of file From a1159a80f5634b45ff56084719ac6e82c0329f31 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 11:03:00 -0700 Subject: [PATCH 16/52] Fixed unit test --- pkg/splunk/enterprise/clustermanager.go | 8 +++++++- pkg/splunk/enterprise/clustermanager_test.go | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 6cb60e281..6fffb032b 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -500,11 +500,17 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente // get the license manager referred in cluster manager err := c.Get(ctx, namespacedName, licenseManager) if err != nil { - return false, err + return true, nil } lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) + if err != nil { + return false, err + } cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + if err != nil { + return false, err + } // check conditions for upgrade if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 6c994f00b..2bb8d83ab 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -66,6 +66,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -81,6 +82,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, From 3aa032b210e55268189ba91caa10c135e6f5850a Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 11:23:02 -0700 Subject: [PATCH 17/52] Fixed SmartStore unit test --- pkg/splunk/enterprise/clustermanager_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 2bb8d83ab..4ff101c9c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -498,6 +498,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, @@ -519,6 +520,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, From f0e73c8e8bac371a6401c5c50325b7fa9df7503e Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 13:32:02 -0700 Subject: [PATCH 18/52] Added code coverage test --- pkg/splunk/enterprise/clustermanager_test.go | 146 ++++++++++++++----- 1 file changed, 112 insertions(+), 34 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 4ff101c9c..4c5c1c15f 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1388,65 +1388,143 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } -func TestChangeClusterManagerAnnotations(t *testing.T) { +func TestUpgradeScenario(t *testing.T) { + ctx := context.TODO() - lm := &enterpriseApi.LicenseManager{ + cm := enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-lm", - Namespace: "default", + Name: "stack1", + Namespace: "test", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "clustermanager", + }, + } + lm := enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "LicenseManager", + }, + } + fmt.Println(ctx, cm, lm) + +} + +func TestGetClusterManagerCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", }, - Spec: enterpriseApi.LicenseManagerSpec{ + Spec: enterpriseApi.ClusterManagerSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: enterpriseApi.Spec{ ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ - Name: "test-cm", + MonitoringConsoleRef: corev1.ObjectReference{ + Name: "mcName", }, }, }, } - cm := &enterpriseApi.ClusterManager{ + replicas := int32(1) + statefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", + Name: "splunk-test-cluster-manager", + Namespace: "test", }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", + Spec: appsv1.StatefulSetSpec{ + ServiceName: "splunk-test-cluster-manager-headless", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, }, - Volumes: []corev1.Volume{}, }, + Replicas: &replicas, }, } - cm.Spec.Image = "splunk/splunk:latest" - + matchlabels := map[string]string{ + "app": "test", + "tier": "splunk", + } + statefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: matchlabels, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cluster-manager-headless", + Namespace: "test", + }, + } + // current.Spec.Image = "splunk/test" client := spltest.NewMockClient() + err := client.Create(ctx, service) + err = client.Create(ctx, statefulset) + err = client.Create(ctx, ¤t) + _, err = ApplyClusterManager(ctx, client, ¤t) + fmt.Println(err) - client.Create(ctx, lm) - client.Create(ctx, cm) - - err := changeClusterManagerAnnotations(ctx, client, lm) - if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) - } - clusterManager := &enterpriseApi.ClusterManager{} - namespacedName := types.NamespacedName{ - Name: cm.Name, - Namespace: cm.Namespace, + stpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cluster-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, } - err = client.Get(ctx, namespacedName, clusterManager) + // simulate create stateful set + err = client.Create(ctx, stpod) if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() } - annotations := clusterManager.GetAnnotations() - if annotations["checkUpdateImage"] != cm.Spec.Image { - t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + // update statefulset + stpod.Status.Phase = corev1.PodRunning + stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, } - + err = client.Status().Update(ctx, stpod) + if err != nil { + t.Errorf("Unexpected update statefulset %v", err) + debug.PrintStack() + } + image, err := getClusterManagerCurrentImage(ctx, client, ¤t) + fmt.Println(image) + fmt.Println(err) } func TestClusterManagerWitReadyState(t *testing.T) { From 53f6f68833d2f809f2a37eed83a540276c76a8c0 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 27 Jun 2023 09:31:40 -0700 Subject: [PATCH 19/52] using fake client instead of mock --- pkg/splunk/enterprise/clustermanager_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 4c5c1c15f..f51abbe6c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1474,7 +1474,11 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { }, } // current.Spec.Image = "splunk/test" - client := spltest.NewMockClient() + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + //client := spltest.NewMockClient() err := client.Create(ctx, service) err = client.Create(ctx, statefulset) err = client.Create(ctx, ¤t) From 8301fd997521803dcb240f89730777e076a9bff5 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 27 Jun 2023 09:42:42 -0700 Subject: [PATCH 20/52] removed creating statefulset and service --- pkg/splunk/enterprise/clustermanager_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index f51abbe6c..5d9425a8c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1467,21 +1467,21 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { statefulset.Spec.Selector = &metav1.LabelSelector{ MatchLabels: matchlabels, } - service := &corev1.Service{ + /*service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-test-cluster-manager-headless", Namespace: "test", }, - } + } */ // current.Spec.Image = "splunk/test" builder := fake.NewClientBuilder() client := builder.Build() utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) //client := spltest.NewMockClient() - err := client.Create(ctx, service) - err = client.Create(ctx, statefulset) - err = client.Create(ctx, ¤t) + //err := client.Create(ctx, service) + //err = client.Create(ctx, statefulset) + err := client.Create(ctx, ¤t) _, err = ApplyClusterManager(ctx, client, ¤t) fmt.Println(err) From 8353d25e969b96ab2e4a4a000c53dba63e850298 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 15:55:16 -0700 Subject: [PATCH 21/52] Corrected LMCurrentImage method --- pkg/splunk/enterprise/licensemanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 3f99e56de..cad24ad11 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -233,7 +233,7 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + Name: GetSplunkStatefulsetName(SplunkLicenseManager, cr.GetName()), } statefulSet := &appsv1.StatefulSet{} err := c.Get(ctx, namespacedName, statefulSet) From dab93db40c1fa8fe6051bb8300c639561d82990f Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 27 Jun 2023 15:08:50 -0700 Subject: [PATCH 22/52] Completed Coverage tests for CM --- pkg/splunk/enterprise/clustermanager.go | 28 +++ pkg/splunk/enterprise/clustermanager_test.go | 231 ++++++++++++++++--- pkg/splunk/enterprise/licensemanager.go | 13 ++ pkg/splunk/enterprise/licensemanager_test.go | 92 ++++++++ pkg/splunk/enterprise/monitoringconsole.go | 7 +- 5 files changed, 334 insertions(+), 37 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 6fffb032b..dd569d333 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -186,6 +186,11 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, if err != nil { return result, err } + + // TODO: Right now if the CM is not ready for upgrade the reconcile loop goes into + // an infite loop and ives Time Out. We still want the other functions to run if + // a proper upgrade does not happen + if !checkUpgradeReady { return result, err } @@ -491,6 +496,10 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + licenseManagerRef := cr.Spec.LicenseManagerRef namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} @@ -505,10 +514,14 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) if err != nil { + eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to licenseManager current image") return false, err } cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) if err != nil { + eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to clusterManager current image") return false, err } @@ -517,6 +530,11 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente return true, nil } + // Temporary workaround to keep the clusterManager method working only when the LM is ready + if licenseManager.Status.Phase == enterpriseApi.PhaseReady { + return true, nil + } + return false, nil } @@ -524,6 +542,10 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente // returns the image, and error if something goes wring func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getClusterManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), @@ -531,10 +553,14 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl statefulSet := &appsv1.StatefulSet{} err := c.Get(ctx, namespacedName, statefulSet) if err != nil { + eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) + scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) if err != nil { + eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) + scopedLog.Error(err, "Unable to get labels") return "", err } @@ -547,6 +573,8 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl err = c.List(ctx, statefulsetPods, opts...) if err != nil { + eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) + scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 5d9425a8c..705823f68 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1391,32 +1391,99 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { func TestUpgradeScenario(t *testing.T) { ctx := context.TODO() - cm := enterpriseApi.ClusterManager{ + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create License Manager + lm := enterpriseApi.LicenseManager{ ObjectMeta: metav1.ObjectMeta{ - Name: "stack1", + Name: "test", Namespace: "test", }, - TypeMeta: metav1.TypeMeta{ - Kind: "clustermanager", + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, }, } - lm := enterpriseApi.LicenseManager{ + + err := client.Create(ctx, &lm) + _, err = ApplyLicenseManager(ctx, client, &lm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + lm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &lm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + // get StatefulSet labels + + namespacedName := types.NamespacedName{ + Namespace: lm.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkLicenseManager, lm.GetName()), + } + lmstatefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, lmstatefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := lmstatefulSet.Spec.Template.ObjectMeta.Labels + + // create LM pod + lmstpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "stack1", + Name: "splunk-test-license-manager-0", Namespace: "test", }, - TypeMeta: metav1.TypeMeta{ - Kind: "LicenseManager", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, }, } - fmt.Println(ctx, cm, lm) - -} + lmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } -func TestGetClusterManagerCurrentImage(t *testing.T) { + // update pod + lmstpod.Status.Phase = corev1.PodRunning + lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } - ctx := context.TODO() - current := enterpriseApi.ClusterManager{ + // Create Cluster Manager + cm := enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "test", @@ -1425,16 +1492,21 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: enterpriseApi.Spec{ ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", }, Volumes: []corev1.Volume{}, - MonitoringConsoleRef: corev1.ObjectReference{ - Name: "mcName", + LicenseManagerRef: corev1.ObjectReference{ + Name: "test", }, }, }, } replicas := int32(1) - statefulset := &appsv1.StatefulSet{ + labels = map[string]string{ + "app": "test", + "tier": "splunk", + } + cmstatefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-test-cluster-manager", Namespace: "test", @@ -1460,30 +1532,114 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { Replicas: &replicas, }, } - matchlabels := map[string]string{ - "app": "test", - "tier": "splunk", + cmstatefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, } - statefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: matchlabels, + + err = client.Create(ctx, &cm) + err = client.Create(ctx, cmstatefulset) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) } - /*service := &corev1.Service{ + + // Create CM pod + cmstpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-headless", + Name: "splunk-test-cluster-manager-0", Namespace: "test", }, - } */ - // current.Spec.Image = "splunk/test" + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + cmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update CM pod + cmstpod.Status.Phase = corev1.PodRunning + cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + cm.Spec.Image = "splunk2" + lmstpod.Status.ContainerStatuses[0].Image = "splunk2" + err = client.Status().Update(ctx, lmstpod) + check, err := upgradeScenario(ctx, client, &cm) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("upgradeScenario: CM should be ready for upgrade") + } + +} + +func TestGetClusterManagerCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } builder := fake.NewClientBuilder() client := builder.Build() utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - //client := spltest.NewMockClient() - //err := client.Create(ctx, service) - //err = client.Create(ctx, statefulset) err := client.Create(ctx, ¤t) _, err = ApplyClusterManager(ctx, client, ¤t) - fmt.Println(err) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + namespacedName := types.NamespacedName{ + Namespace: current.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, current.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, statefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := statefulSet.Spec.Template.ObjectMeta.Labels stpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -1505,7 +1661,8 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { }, }, } - // simulate create stateful set + stpod.ObjectMeta.Labels = labels + // simulate create pod err = client.Create(ctx, stpod) if err != nil { t.Errorf("Unexpected create pod failed %v", err) @@ -1523,12 +1680,18 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { } err = client.Status().Update(ctx, stpod) if err != nil { - t.Errorf("Unexpected update statefulset %v", err) + t.Errorf("Unexpected update pod %v", err) debug.PrintStack() } + image, err := getClusterManagerCurrentImage(ctx, client, ¤t) - fmt.Println(image) - fmt.Println(err) + + if err != nil { + t.Errorf("Unexpected getClusterManagerCurrentImage error %v", err) + } + if image != stpod.Status.ContainerStatuses[0].Image { + t.Errorf("getClusterManagerCurrentImage does not return the current pod image") + } } func TestClusterManagerWitReadyState(t *testing.T) { diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index cad24ad11..60d8a95a9 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -231,6 +231,11 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getLicenseManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkLicenseManager, cr.GetName()), @@ -238,13 +243,18 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl statefulSet := &appsv1.StatefulSet{} err := c.Get(ctx, namespacedName, statefulSet) if err != nil { + eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) + scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) if err != nil { + eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) + scopedLog.Error(err, "Unable to get labels") return "", err } + // get a list of all pods in the namespace with matching labels as the statefulset statefulsetPods := &corev1.PodList{} opts := []rclient.ListOption{ rclient.InNamespace(cr.GetNamespace()), @@ -253,9 +263,12 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl err = c.List(ctx, statefulsetPods, opts...) if err != nil { + eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) + scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } + // find the container with the phrase 'splunk' in it for _, v := range statefulsetPods.Items { for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 8c7d597c9..25ffd6f0b 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -720,6 +720,98 @@ func TestLicenseManagerList(t *testing.T) { } } +func TestGetLicenseManagerCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + err := client.Create(ctx, ¤t) + _, err = ApplyLicenseManager(ctx, client, ¤t) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + + namespacedName := types.NamespacedName{ + Namespace: current.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkLicenseManager, current.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, statefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := statefulSet.Spec.Template.ObjectMeta.Labels + + stpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-license-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + stpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, stpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update statefulset + stpod.Status.Phase = corev1.PodRunning + stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, stpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + image, err := getLicenseManagerCurrentImage(ctx, client, ¤t) + + if err != nil { + t.Errorf("Unexpected getLicenseManagerCurrentImage error %v", err) + } + if image != stpod.Status.ContainerStatuses[0].Image { + t.Errorf("getLicenseManagerCurrentImage does not return the current pod image") + } +} + func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index a1fb3e2b9..a042a0c6f 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -358,7 +358,7 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor // changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. -func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { +func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), @@ -369,17 +369,18 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co if err != nil && k8serrors.IsNotFound(err) { return nil } + image, _ := getClusterManagerCurrentImage(ctx, client, cr) annotations := monitoringConsoleInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} } if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == monitoringConsoleInstance.Spec.Image { + if annotations["checkUpdateImage"] == image { return nil } } - annotations["checkUpdateImage"] = monitoringConsoleInstance.Spec.Image + annotations["checkUpdateImage"] = image monitoringConsoleInstance.SetAnnotations(annotations) err = client.Update(ctx, monitoringConsoleInstance) From fb778805353597169558fd7a8eb72b71f0238023 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 10:28:17 -0700 Subject: [PATCH 23/52] Refined changeClusterManagerAnnotations --- pkg/splunk/enterprise/licensemanager.go | 56 +++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 60d8a95a9..ce58aa825 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -27,6 +27,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -281,3 +282,58 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } + +// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { + +// namespacedName := types.NamespacedName{ +// Namespace: cr.GetNamespace(), +// Name: cr.Spec.ClusterManagerRef.Name, +// } +// clusterManagerInstance := &enterpriseApi.ClusterManager{} +// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) +// if err != nil && k8serrors.IsNotFound(err) { +// return false, nil +// } +// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { +// return true, nil +// } + +// return true, err + +// } + +// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From b6c70d54b97f5b2874055e50eaae66ce807f4b29 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 15 Jun 2023 15:05:56 -0700 Subject: [PATCH 24/52] test case for upgrade scenario --- .../upgrade/c3-with-operator/01-assert.yaml | 93 +++++++++- .../upgrade/c3-with-operator/02-assert.yaml | 2 +- .../upgrade/c3-with-operator/03-assert.yaml | 35 +--- .../03-upgrade-splunk-image.yaml | 6 + .../upgrade/c3-with-operator/04-assert.yaml | 174 +++++++++++++++++- 5 files changed, 278 insertions(+), 32 deletions(-) create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml index 4b09ebf54..dce36af8b 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -6,4 +6,95 @@ metadata: name: splunk-operator-controller-manager status: readyReplicas: 1 - availableReplicas: 1 \ No newline at end of file + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml index 731366343..59008dd62 100644 --- a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -21,4 +21,4 @@ status: apiVersion: v1 kind: Secret metadata: - name: splunk-cm-cluster-manager-secret-v1 \ No newline at end of file + name: splunk-cm-cluster-manager-secret-v1 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml index c3c560798..84b4ee495 100644 --- a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -1,40 +1,17 @@ --- -# assert for SearchHeadCluster custom resource to be ready +# assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster +kind: IndexerCluster metadata: - name: shc + name: idxc status: phase: Ready --- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set +# check for stateful sets and replicas updated apiVersion: apps/v1 kind: StatefulSet metadata: - name: splunk-shc-deployer + name: splunk-idxc-indexer status: - replicas: 1 \ No newline at end of file + replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml new file mode 100644 index 000000000..a11eefac7 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml index 4d5aadaf4..4f883ab81 100644 --- a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -1,3 +1,38 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + --- # assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 @@ -21,4 +56,141 @@ status: apiVersion: v1 kind: Secret metadata: - name: splunk-idxc-indexer-secret-v1 \ No newline at end of file + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-cm-cluster-manager-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-deployer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true From 6957966f146c2a7ad4b4fab498508a53dd4b8328 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 21 Jun 2023 10:31:37 -0700 Subject: [PATCH 25/52] Modified kuttl cases --- kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../upgrade/c3-with-operator/01-assert.yaml | 93 +------------------ .../upgrade/c3-with-operator/03-assert.yaml | 17 ---- .../03-upgrade-splunk-image.yaml | 6 -- pkg/splunk/enterprise/licensemanager.go | 19 ---- 5 files changed, 2 insertions(+), 135 deletions(-) delete mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml delete mode 100644 kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index d8ecc7336..a152a8423 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 5000 +timeout: 500 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml index dce36af8b..4b09ebf54 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -6,95 +6,4 @@ metadata: name: splunk-operator-controller-manager status: readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 + availableReplicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml deleted file mode 100644 index 84b4ee495..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful sets and replicas updated -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml deleted file mode 100644 index a11eefac7..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} - namespaced: true \ No newline at end of file diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index ce58aa825..418bea803 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -283,25 +283,6 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } -// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { - -// namespacedName := types.NamespacedName{ -// Namespace: cr.GetNamespace(), -// Name: cr.Spec.ClusterManagerRef.Name, -// } -// clusterManagerInstance := &enterpriseApi.ClusterManager{} -// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) -// if err != nil && k8serrors.IsNotFound(err) { -// return false, nil -// } -// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { -// return true, nil -// } - -// return true, err - -// } - // changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { From 70c73c299666a504e05311b222fc91b3878e0c6c Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 10:59:44 -0700 Subject: [PATCH 26/52] Added kuttl tests; Updated LicenseMaster --- kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../upgrade/c3-with-operator/03-assert.yaml | 40 ++++ .../upgrade/c3-with-operator/04-assert.yaml | 172 ------------------ pkg/splunk/enterprise/licensemaster.go | 37 ++++ 4 files changed, 78 insertions(+), 173 deletions(-) create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index a152a8423..d8ecc7336 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 500 +timeout: 5000 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..c3c560798 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,40 @@ +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml index 4f883ab81..368902426 100644 --- a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -1,38 +1,3 @@ ---- -# assert for splunk operator pod to be ready -apiVersion: apps/v1 -kind: Deployment -metadata: - name: splunk-operator-controller-manager -status: - readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - --- # assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 @@ -57,140 +22,3 @@ apiVersion: v1 kind: Secret metadata: name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-cm-cluster-manager-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-deployer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 8ff920be8..3c3506886 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -225,3 +226,39 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } + +// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} + err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterMasterInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image + + clusterMasterInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterMasterInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From 4a945eb3fa281e5b624ab77ab78e49a10fa10bf5 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 16:40:45 -0700 Subject: [PATCH 27/52] Fixed unit test --- pkg/splunk/enterprise/clustermanager.go | 37 +++++++++++++++++++++++++ pkg/splunk/enterprise/licensemanager.go | 37 ------------------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index dd569d333..c08f4ea52 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -34,6 +34,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" rclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -591,3 +592,39 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } + +// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 418bea803..60d8a95a9 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -27,7 +27,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -282,39 +281,3 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } - -// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image - - clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} From a2c9f6df46a5c326553f9ef5b49d21135a94dff2 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 14:40:57 -0700 Subject: [PATCH 28/52] Removed changeAnnotation from licenseMaster --- pkg/splunk/enterprise/licensemaster.go | 37 -------------------------- 1 file changed, 37 deletions(-) diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 3c3506886..8ff920be8 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,7 +25,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -226,39 +225,3 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } - -// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} - err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterMasterInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image - - clusterMasterInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterMasterInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} From 86baa2177bb51e5f6b707526903e05d7472fd212 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 16:42:05 -0700 Subject: [PATCH 29/52] Completed code coverage tests --- pkg/splunk/enterprise/clustermanager.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index c08f4ea52..947419f80 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -597,15 +597,20 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl // on update, and returns error if something is wrong func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name, } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + err := client.Get(ctx, namespacedName, clusterManagerInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + + // fetch and check the annotation fields of the ClusterManager annotations := clusterManagerInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -616,12 +621,13 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr } } + // create/update the checkUpdateImage annotation field annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image clusterManagerInstance.SetAnnotations(annotations) err = client.Update(ctx, clusterManagerInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) return err } From b24fca74cc2cbbb92515e4892ff0be5219fce128 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 28 Jun 2023 11:17:03 -0700 Subject: [PATCH 30/52] Resolved all conflict issues --- env.sh | 1 - .../upgrade/c3-with-operator/05-uninstall-c3 | 5 - .../c3-with-operator/05-uninstall-c3.yaml | 2 +- pkg/splunk/enterprise/clustermanager.go | 17 +- pkg/splunk/enterprise/clustermanager_test.go | 163 ++++++++++++++++++ 5 files changed, 174 insertions(+), 14 deletions(-) delete mode 100644 env.sh delete mode 100644 kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 diff --git a/env.sh b/env.sh deleted file mode 100644 index f1f641af1..000000000 --- a/env.sh +++ /dev/null @@ -1 +0,0 @@ -#!/usr/bin/env bash diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 deleted file mode 100644 index 95f8297ca..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} - namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml index abb75c68d..cf9d19cf8 100644 --- a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -1,5 +1,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm uninstall splunk-c3--namespace ${NAMESPACE} + - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} namespaced: true diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 947419f80..077a566e3 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -191,7 +191,6 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, // TODO: Right now if the CM is not ready for upgrade the reconcile loop goes into // an infite loop and ives Time Out. We still want the other functions to run if // a proper upgrade does not happen - if !checkUpgradeReady { return result, err } @@ -595,39 +594,43 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl // changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop // on update, and returns error if something is wrong -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { +func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name, } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(ctx, namespacedName, clusterManagerInstance) + err := c.Get(ctx, namespacedName, clusterManagerInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + image, _ := getLicenseManagerCurrentImage(ctx, c, cr) + // fetch and check the annotation fields of the ClusterManager annotations := clusterManagerInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} } if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + if annotations["checkUpdateImage"] == image { return nil } } // create/update the checkUpdateImage annotation field - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + annotations["checkUpdateImage"] = image clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) + err = c.Update(ctx, clusterManagerInstance) if err != nil { - scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) + eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 705823f68..c694ca9bd 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1694,6 +1694,169 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { } } +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + + // define LM and CM + lm := &enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lm", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test-cm", + }, + }, + }, + } + replicas := int32(1) + labels := map[string]string{ + "app": "test", + "tier": "splunk", + } + lmstatefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-lm-license-manager", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "splunk-test-lm-license-manager-headless", + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + }, + Replicas: &replicas, + }, + } + lmstatefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + lm.Spec.Image = "splunk/splunk:latest" + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create the instances + client.Create(ctx, lm) + client.Create(ctx, lmstatefulset) + _, err := ApplyLicenseManager(ctx, client, lm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + lm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, lm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, cm) + _, err = ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + // create LM pod + lmstpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-license-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + lmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update pod + lmstpod.Status.Phase = corev1.PodRunning + lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + err = changeClusterManagerAnnotations(ctx, client, lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, clusterManager) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + annotations := clusterManager.GetAnnotations() + if annotations["checkUpdateImage"] != lm.Spec.Image { + t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + } + +} + func TestClusterManagerWitReadyState(t *testing.T) { // create directory for app framework newpath := filepath.Join("/tmp", "appframework") From 1d0cc579fab6583c264a97ecc034e9a33132fd43 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 28 Jun 2023 11:37:36 -0700 Subject: [PATCH 31/52] Added comments --- pkg/splunk/enterprise/clustermanager.go | 43 +------------------- pkg/splunk/enterprise/clustermanager_test.go | 2 +- 2 files changed, 2 insertions(+), 43 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 077a566e3..7b037d73e 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -34,7 +34,6 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" rclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -453,47 +452,7 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// changeClusterMasterAnnotations updates the checkUpdateImage field of the Cluster Master Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - - // fetch and check the annotation fields of the ClusterManager - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { - return nil - } - } - - // create/update the checkUpdateImage annotation field - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image - - clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) - if err != nil { - scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) - return err - } - - return nil -} - +// upgradeScenario checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { reqLogger := log.FromContext(ctx) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index c694ca9bd..afc627149 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1795,7 +1795,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { // create LM pod lmstpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-license-manager-0", + Name: "splunk-test-lm-license-manager-0", Namespace: "test", }, Spec: corev1.PodSpec{ From 10cc0b6bd950dad46c98587542ddcb2a960b1d53 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 30 Jun 2023 09:52:49 -0700 Subject: [PATCH 32/52] Updated upgradeScenario to check if statefulSet exists --- pkg/splunk/enterprise/clustermanager.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 7b037d73e..eb9534def 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -459,14 +459,26 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + licenseManagerRef := cr.Spec.LicenseManagerRef - namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} // create new object licenseManager := &enterpriseApi.LicenseManager{} // get the license manager referred in cluster manager - err := c.Get(ctx, namespacedName, licenseManager) + err = c.Get(ctx, namespacedName, licenseManager) if err != nil { return true, nil } From 27ddd678f2b2d3e082f6898f54283f5da4b6e3c0 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 30 Jun 2023 10:18:23 -0700 Subject: [PATCH 33/52] Fixed Unit tests --- pkg/splunk/enterprise/clustermanager_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index afc627149..99a524e82 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -66,7 +66,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -82,6 +82,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, @@ -498,6 +499,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, @@ -520,6 +522,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, From 0378e943eb726c287b6ccd646c3deab06a054b60 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 12:58:00 -0700 Subject: [PATCH 34/52] Added common APIs, changed upgrade condition --- Makefile | 2 +- pkg/splunk/enterprise/clustermanager.go | 129 ++------- pkg/splunk/enterprise/clustermanager_test.go | 264 ++----------------- pkg/splunk/enterprise/licensemanager.go | 54 ---- pkg/splunk/enterprise/licensemanager_test.go | 92 ------- pkg/splunk/enterprise/monitoringconsole.go | 24 +- pkg/splunk/enterprise/util.go | 47 ++++ pkg/splunk/enterprise/util_test.go | 40 +++ 8 files changed, 140 insertions(+), 512 deletions(-) diff --git a/Makefile b/Makefile index aef47f310..dd59513ae 100644 --- a/Makefile +++ b/Makefile @@ -137,7 +137,7 @@ build: setup/ginkgo manifests generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go -docker-build: #test ## Build docker image with the manager. +docker-build: test ## Build docker image with the manager. docker build -t ${IMG} . docker-push: ## Push docker image with the manager. diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index eb9534def..3fb52fab3 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "reflect" - "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -34,9 +33,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -182,15 +179,8 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } - checkUpgradeReady, err := upgradeScenario(ctx, client, cr) - if err != nil { - return result, err - } - - // TODO: Right now if the CM is not ready for upgrade the reconcile loop goes into - // an infite loop and ives Time Out. We still want the other functions to run if - // a proper upgrade does not happen - if !checkUpgradeReady { + continueReconcile, err := isClusterManagerReadyForUpgrade(ctx, client, cr) + if err != nil || !continueReconcile { return result, err } @@ -452,11 +442,11 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// upgradeScenario checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly -func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { +// isClusterManagerReadyForUpgrade checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly +func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) namespacedName := types.NamespacedName{ @@ -472,6 +462,10 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente } licenseManagerRef := cr.Spec.LicenseManagerRef + if licenseManagerRef.Name == "" { + return true, nil + } + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} // create new object @@ -480,90 +474,35 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente // get the license manager referred in cluster manager err = c.Get(ctx, namespacedName, licenseManager) if err != nil { - return true, nil + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the License Manager. Reason %v", err)) + scopedLog.Error(err, "Unable to get licenseManager") + return true, err } - lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) - if err != nil { - eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) - scopedLog.Error(err, "Unable to licenseManager current image") - return false, err - } - cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) if err != nil { - eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) - scopedLog.Error(err, "Unable to clusterManager current image") + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") return false, err } // check conditions for upgrade - if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { - return true, nil - } - - // Temporary workaround to keep the clusterManager method working only when the LM is ready - if licenseManager.Status.Phase == enterpriseApi.PhaseReady { - return true, nil - } - - return false, nil -} - -// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, -// returns the image, and error if something goes wring -func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("getClusterManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - eventPublisher, _ := newK8EventPublisher(c, cr) - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err := c.Get(ctx, namespacedName, statefulSet) - if err != nil { - eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) - scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err - } - labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) - if err != nil { - eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) - scopedLog.Error(err, "Unable to get labels") - return "", err - } - - // get a list of all pods in the namespace with matching labels as the statefulset - statefulsetPods := &corev1.PodList{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), - rclient.MatchingLabelsSelector{Selector: labelSelector}, - } - - err = c.List(ctx, statefulsetPods, opts...) - if err != nil { - eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) - scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err + annotations := cr.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} } - - // find the container with the phrase 'splunk' in it - for _, v := range statefulsetPods.Items { - for _, container := range v.Status.ContainerStatuses { - if strings.Contains(container.Name, "splunk") { - image := container.Image - return image, nil - } - + if _, ok := annotations["splunk/image-tag"]; ok { + if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { + return false, nil } + } else { + return false, nil } - return "", nil + return true, nil } -// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// changeClusterManagerAnnotations updates the checkUpdateImage field of the clusterManager annotations to trigger the reconcile loop // on update, and returns error if something is wrong func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { @@ -581,24 +520,10 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller return nil } - image, _ := getLicenseManagerCurrentImage(ctx, c, cr) - - // fetch and check the annotation fields of the ClusterManager - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == image { - return nil - } - } + image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - // create/update the checkUpdateImage annotation field - annotations["checkUpdateImage"] = image + err = changeAnnotations(ctx, c, image, clusterManagerInstance) - clusterManagerInstance.SetAnnotations(annotations) - err = c.Update(ctx, clusterManagerInstance) if err != nil { eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 99a524e82..86c1507a2 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -83,7 +83,6 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -500,7 +499,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, @@ -523,7 +521,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -1391,7 +1388,7 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } -func TestUpgradeScenario(t *testing.T) { +func TestIsClusterManagerReadyForUpgrade(t *testing.T) { ctx := context.TODO() @@ -1412,6 +1409,9 @@ func TestUpgradeScenario(t *testing.T) { Image: "splunk/splunk:latest", }, Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, }, }, } @@ -1428,63 +1428,6 @@ func TestUpgradeScenario(t *testing.T) { debug.PrintStack() } - // get StatefulSet labels - - namespacedName := types.NamespacedName{ - Namespace: lm.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkLicenseManager, lm.GetName()), - } - lmstatefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, lmstatefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := lmstatefulSet.Spec.Template.ObjectMeta.Labels - - // create LM pod - lmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-license-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - lmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update pod - lmstpod.Status.Phase = corev1.PodRunning - lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - // Create Cluster Manager cm := enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ @@ -1505,10 +1448,7 @@ func TestUpgradeScenario(t *testing.T) { }, } replicas := int32(1) - labels = map[string]string{ - "app": "test", - "tier": "splunk", - } + cmstatefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-test-cluster-manager", @@ -1535,9 +1475,6 @@ func TestUpgradeScenario(t *testing.T) { Replicas: &replicas, }, } - cmstatefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } err = client.Create(ctx, &cm) err = client.Create(ctx, cmstatefulset) @@ -1546,54 +1483,21 @@ func TestUpgradeScenario(t *testing.T) { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } - // Create CM pod - cmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - cmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, cmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } + cm.Spec.Image = "splunk2" + lm.Spec.Image = "splunk2" + _, err = ApplyLicenseManager(ctx, client, &lm) - // update CM pod - cmstpod.Status.Phase = corev1.PodRunning - cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, } - err = client.Status().Update(ctx, cmstpod) + err = client.Get(ctx, namespacedName, clusterManager) if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) } - cm.Spec.Image = "splunk2" - lmstpod.Status.ContainerStatuses[0].Image = "splunk2" - err = client.Status().Update(ctx, lmstpod) - check, err := upgradeScenario(ctx, client, &cm) + check, err := isClusterManagerReadyForUpgrade(ctx, client, clusterManager) if err != nil { t.Errorf("Unexpected upgradeScenario error %v", err) @@ -1605,98 +1509,6 @@ func TestUpgradeScenario(t *testing.T) { } -func TestGetClusterManagerCurrentImage(t *testing.T) { - - ctx := context.TODO() - current := enterpriseApi.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - }, - }, - } - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - err := client.Create(ctx, ¤t) - _, err = ApplyClusterManager(ctx, client, ¤t) - if err != nil { - t.Errorf("applyClusterManager should not have returned error; err=%v", err) - } - - namespacedName := types.NamespacedName{ - Namespace: current.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, current.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, statefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := statefulSet.Spec.Template.ObjectMeta.Labels - - stpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - stpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, stpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update statefulset - stpod.Status.Phase = corev1.PodRunning - stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, stpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - image, err := getClusterManagerCurrentImage(ctx, client, ¤t) - - if err != nil { - t.Errorf("Unexpected getClusterManagerCurrentImage error %v", err) - } - if image != stpod.Status.ContainerStatuses[0].Image { - t.Errorf("getClusterManagerCurrentImage does not return the current pod image") - } -} - func TestChangeClusterManagerAnnotations(t *testing.T) { ctx := context.TODO() @@ -1795,50 +1607,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } - // create LM pod - lmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-lm-license-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - lmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update pod - lmstpod.Status.Phase = corev1.PodRunning - lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - err = changeClusterManagerAnnotations(ctx, client, lm) if err != nil { t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) @@ -1854,7 +1622,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { } annotations := clusterManager.GetAnnotations() - if annotations["checkUpdateImage"] != lm.Spec.Image { + if annotations["splunk/image-tag"] != lm.Spec.Image { t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 60d8a95a9..828a169d5 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "reflect" - "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -27,10 +26,8 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -230,54 +227,3 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } -func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("getLicenseManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - eventPublisher, _ := newK8EventPublisher(c, cr) - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkLicenseManager, cr.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err := c.Get(ctx, namespacedName, statefulSet) - if err != nil { - eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) - scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err - } - labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) - if err != nil { - eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) - scopedLog.Error(err, "Unable to get labels") - return "", err - } - - // get a list of all pods in the namespace with matching labels as the statefulset - statefulsetPods := &corev1.PodList{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), - rclient.MatchingLabelsSelector{Selector: labelSelector}, - } - - err = c.List(ctx, statefulsetPods, opts...) - if err != nil { - eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) - scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err - } - - // find the container with the phrase 'splunk' in it - for _, v := range statefulsetPods.Items { - for _, container := range v.Status.ContainerStatuses { - if strings.Contains(container.Name, "splunk") { - image := container.Image - return image, nil - } - - } - } - - return "", nil -} diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 25ffd6f0b..8c7d597c9 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -720,98 +720,6 @@ func TestLicenseManagerList(t *testing.T) { } } -func TestGetLicenseManagerCurrentImage(t *testing.T) { - - ctx := context.TODO() - current := enterpriseApi.LicenseManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.LicenseManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - }, - }, - } - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - err := client.Create(ctx, ¤t) - _, err = ApplyLicenseManager(ctx, client, ¤t) - if err != nil { - t.Errorf("applyLicenseManager should not have returned error; err=%v", err) - } - - namespacedName := types.NamespacedName{ - Namespace: current.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkLicenseManager, current.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, statefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := statefulSet.Spec.Template.ObjectMeta.Labels - - stpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-license-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - stpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, stpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update statefulset - stpod.Status.Phase = corev1.PodRunning - stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, stpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - image, err := getLicenseManagerCurrentImage(ctx, client, ¤t) - - if err != nil { - t.Errorf("Unexpected getLicenseManagerCurrentImage error %v", err) - } - if image != stpod.Status.ContainerStatuses[0].Image { - t.Errorf("getLicenseManagerCurrentImage does not return the current pod image") - } -} - func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index a042a0c6f..06a7c95b9 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -360,32 +360,26 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(client, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.MonitoringConsoleRef.Name, } monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} - err := client.Get(context.TODO(), namespacedName, monitoringConsoleInstance) + err := client.Get(ctx, namespacedName, monitoringConsoleInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } - image, _ := getClusterManagerCurrentImage(ctx, client, cr) - annotations := monitoringConsoleInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == image { - return nil - } - } + image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - annotations["checkUpdateImage"] = image + err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) - monitoringConsoleInstance.SetAnnotations(annotations) - err = client.Update(ctx, monitoringConsoleInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "MonitoringConsole types update after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 35c1f3cbf..b180e3271 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2272,3 +2272,50 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32) } return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) } + +// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, +// returns the image, and error if something goes wrong +func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) (string, error) { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(instanceType, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil { + return "", err + } + + image := statefulSet.Spec.Template.Spec.Containers[0].Image + + return image, nil + +} + +// changeAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong +func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image string, cr splcommon.MetaObject) error { + + annotations := cr.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["splunk/image-tag"]; ok { + if annotations["splunk/image-tag"] == image { + return nil + } + } + + // create/update the checkUpdateImage annotation field + annotations["splunk/image-tag"] = image + + cr.SetAnnotations(annotations) + err := c.Update(ctx, cr) + if err != nil { + return err + } + + return nil + +} diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index a15d5913e..64587db8a 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -3149,3 +3149,43 @@ func TestGetLicenseMasterURL(t *testing.T) { t.Errorf("Expected a valid return value") } } +func TestGetCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + err := client.Create(ctx, ¤t) + _, err = ApplyClusterManager(ctx, client, ¤t) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + instanceType := SplunkClusterManager + + image, err := getCurrentImage(ctx, client, ¤t, instanceType) + + if err != nil { + t.Errorf("Unexpected getCurrentImage error %v", err) + } + if image != current.Spec.Image { + t.Errorf("getCurrentImage does not return the current statefulset image") + } + +} From a116e9cf7e9cf1a80b78074573c9885b2753c8df Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 14:56:15 -0700 Subject: [PATCH 35/52] Added only warning if annotation not found --- pkg/splunk/enterprise/clustermanager.go | 3 +- pkg/splunk/enterprise/clustermanager_test.go | 67 -------------------- 2 files changed, 1 insertion(+), 69 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 3fb52fab3..ebb7eee36 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -179,6 +179,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } + // check if the ClusterManager is ready for version upgrade, if required continueReconcile, err := isClusterManagerReadyForUpgrade(ctx, client, cr) if err != nil || !continueReconcile { return result, err @@ -495,8 +496,6 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { return false, nil } - } else { - return false, nil } return true, nil diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 86c1507a2..b7f43356b 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1447,37 +1447,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { }, }, } - replicas := int32(1) - - cmstatefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager", - Namespace: "test", - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: "splunk-test-cluster-manager-headless", - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - }, - Replicas: &replicas, - }, - } err = client.Create(ctx, &cm) - err = client.Create(ctx, cmstatefulset) _, err = ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) @@ -1530,43 +1501,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { }, }, } - replicas := int32(1) - labels := map[string]string{ - "app": "test", - "tier": "splunk", - } - lmstatefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-lm-license-manager", - Namespace: "test", - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: "splunk-test-lm-license-manager-headless", - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - }, - Replicas: &replicas, - }, - } - lmstatefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } cm := &enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ @@ -1590,7 +1524,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { // Create the instances client.Create(ctx, lm) - client.Create(ctx, lmstatefulset) _, err := ApplyLicenseManager(ctx, client, lm) if err != nil { t.Errorf("applyLicenseManager should not have returned error; err=%v", err) From 3695b41f86aa8659c209e2a5d8a0aab6c88e7dbd Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 14:59:37 -0700 Subject: [PATCH 36/52] Add warning --- pkg/splunk/enterprise/clustermanager.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index ebb7eee36..2589caca1 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -496,6 +496,8 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { return false, nil } + } else { + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the annotations. Reason %v", err)) } return true, nil From 80e6accb84c787df75454cf9c8336ce735da4703 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 17:19:32 -0700 Subject: [PATCH 37/52] Updated upgradeCondition --- pkg/splunk/enterprise/clustermanager.go | 44 +++++++++----------- pkg/splunk/enterprise/clustermanager_test.go | 11 +---- pkg/splunk/enterprise/monitoringconsole.go | 4 +- pkg/splunk/enterprise/util.go | 11 +++-- 4 files changed, 27 insertions(+), 43 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 2589caca1..59f9c4ae1 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -443,13 +443,19 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// isClusterManagerReadyForUpgrade checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly +// isClusterManagerReadyForUpgrade checks if ClusterManager can be upgraded if a version upgrade is in-progress +// No-operation otherwise; returns bool, err accordingly func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { - reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) + // check if a LicenseManager is attached to the instance + licenseManagerRef := cr.Spec.LicenseManagerRef + if licenseManagerRef.Name == "" { + return true, nil + } + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), @@ -462,14 +468,7 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return true, nil } - licenseManagerRef := cr.Spec.LicenseManagerRef - if licenseManagerRef.Name == "" { - return true, nil - } - namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} - - // create new object licenseManager := &enterpriseApi.LicenseManager{} // get the license manager referred in cluster manager @@ -480,6 +479,13 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return true, err } + lmImage, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) + if err != nil { + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get licenseManager current image") + return false, err + } + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) if err != nil { eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) @@ -487,26 +493,17 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return false, err } - // check conditions for upgrade - annotations := cr.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["splunk/image-tag"]; ok { - if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { - return false, nil - } - } else { - eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the annotations. Reason %v", err)) + // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || lmImage != cr.Spec.Image) { + return false, nil } return true, nil } -// changeClusterManagerAnnotations updates the checkUpdateImage field of the clusterManager annotations to trigger the reconcile loop +// changeClusterManagerAnnotations updates the splunk/image-tag field of the ClusterManager annotations to trigger the reconcile loop // on update, and returns error if something is wrong func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) @@ -520,9 +517,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller if err != nil && k8serrors.IsNotFound(err) { return nil } - image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - err = changeAnnotations(ctx, c, image, clusterManagerInstance) if err != nil { @@ -532,5 +527,4 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } return nil - } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index b7f43356b..fcec6a19a 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -67,7 +67,6 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -84,7 +83,6 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -499,7 +497,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -522,7 +519,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -1389,7 +1385,6 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { } func TestIsClusterManagerReadyForUpgrade(t *testing.T) { - ctx := context.TODO() builder := fake.NewClientBuilder() @@ -1424,7 +1419,7 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { lm.Status.Phase = enterpriseApi.PhaseReady err = client.Status().Update(ctx, &lm) if err != nil { - t.Errorf("Unexpected update pod %v", err) + t.Errorf("Unexpected status update %v", err) debug.PrintStack() } @@ -1475,9 +1470,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } if !check { - t.Errorf("upgradeScenario: CM should be ready for upgrade") + t.Errorf("isClusterManagerReadyForUpgrade: CM should be ready for upgrade") } - } func TestChangeClusterManagerAnnotations(t *testing.T) { @@ -1558,7 +1552,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { if annotations["splunk/image-tag"] != lm.Spec.Image { t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") } - } func TestClusterManagerWitReadyState(t *testing.T) { diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 06a7c95b9..d8197bf04 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -356,10 +356,9 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } -// changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop +// changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { - reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(client, cr) @@ -374,7 +373,6 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co return nil } image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) if err != nil { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index b180e3271..ff40a7af7 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2273,10 +2273,8 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32) return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) } -// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, -// returns the image, and error if something goes wrong +// getCurrentImage gets the image of the statefulset, returns the image, and error if something goes wrong func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) (string, error) { - namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(instanceType, cr.GetName()), @@ -2287,16 +2285,17 @@ func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splco return "", err } + if statefulSet.Spec.Template.Spec.Containers == nil { + return "", nil + } image := statefulSet.Spec.Template.Spec.Containers[0].Image return image, nil } -// changeAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong +// changeAnnotations updates the splunk/image-tag field to trigger the reconcile loop, and returns error if something is wrong func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image string, cr splcommon.MetaObject) error { - annotations := cr.GetAnnotations() if annotations == nil { annotations = map[string]string{} From 3db7d4c6364037ad27d545744ef323fb2e1feb46 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 14:10:19 -0700 Subject: [PATCH 38/52] updated changeAnnotation to work with no ref --- pkg/splunk/enterprise/clustermanager.go | 43 ++++++++++++++++---- pkg/splunk/enterprise/clustermanager_test.go | 6 +-- pkg/splunk/enterprise/monitoringconsole.go | 43 ++++++++++++++++---- pkg/splunk/enterprise/util.go | 8 ++-- 4 files changed, 77 insertions(+), 23 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 59f9c4ae1..53cb8e83c 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -23,6 +23,7 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" @@ -508,17 +509,43 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := c.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if len(cr.Spec.ClusterManagerRef.Name) > 0 { + // if the LicenseManager holds the ClusterManagerRef + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + err := c.Get(ctx, namespacedName, clusterManagerInstance) + if err != nil { + return err + } + } else { + // List out all the ClusterManager instances in the namespace + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + objectList := enterpriseApi.ClusterManagerList{} + err := c.List(ctx, &objectList, opts...) + if err != nil { + return err + } + + // check with instance has the required LicenseManagerRef + for _, cm := range objectList.Items { + if cm.Spec.LicenseManagerRef.Name == cr.GetName() { + clusterManagerInstance = &cm + break + } + } + + if len(clusterManagerInstance.GetName()) == 0 { + return nil + } } + image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - err = changeAnnotations(ctx, c, image, clusterManagerInstance) + err := changeAnnotations(ctx, c, image, clusterManagerInstance) if err != nil { eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index fcec6a19a..63314c870 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1489,9 +1489,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ - Name: "test-cm", - }, }, }, } @@ -1507,6 +1504,9 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, + LicenseManagerRef: corev1.ObjectReference{ + Name: "test-lm", + }, }, }, } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index d8197bf04..ffd82fc3c 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -34,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -363,17 +364,43 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(client, cr) - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.MonitoringConsoleRef.Name, - } monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} - err := client.Get(ctx, namespacedName, monitoringConsoleInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if len(cr.Spec.MonitoringConsoleRef.Name) > 0 { + // if the ClusterManager holds the MonitoringConsoleRef + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.MonitoringConsoleRef.Name, + } + err := client.Get(ctx, namespacedName, monitoringConsoleInstance) + if err != nil { + return err + } + } else { + // List out all the MonitoringConsole instances in the namespace + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + objectList := enterpriseApi.MonitoringConsoleList{} + err := client.List(ctx, &objectList, opts...) + if err != nil { + return err + } + + // check with instance has the required ClusterManagerRef + for _, mc := range objectList.Items { + if mc.Spec.ClusterManagerRef.Name == cr.GetName() { + monitoringConsoleInstance = &mc + break + } + } + + if len(monitoringConsoleInstance.GetName()) == 0 { + return nil + } } + image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) + err := changeAnnotations(ctx, client, image, monitoringConsoleInstance) if err != nil { eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index ff40a7af7..ceba58ace 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2285,12 +2285,12 @@ func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splco return "", err } - if statefulSet.Spec.Template.Spec.Containers == nil { - return "", nil + if len(statefulSet.Spec.Template.Spec.Containers) > 0 { + return statefulSet.Spec.Template.Spec.Containers[0].Image, nil } - image := statefulSet.Spec.Template.Spec.Containers[0].Image + err = fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) - return image, nil + return "", err } From 1c1531a905592faa53f7902eaebd1bba8d6fd9ee Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 14:41:47 -0700 Subject: [PATCH 39/52] Fixed unit tests --- pkg/splunk/enterprise/clustermanager.go | 4 ++-- pkg/splunk/enterprise/licensemanager_test.go | 15 +++++++++------ pkg/splunk/enterprise/monitoringconsole.go | 4 ++-- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 53cb8e83c..392c45f05 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -527,8 +527,8 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } objectList := enterpriseApi.ClusterManagerList{} err := c.List(ctx, &objectList, opts...) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } // check with instance has the required LicenseManagerRef diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 8c7d597c9..4199ae504 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -57,7 +57,6 @@ func TestApplyLicenseManager(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-stack1-license-manager-secret-v1"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, - {MetaName: "*v4.ClusterManager-test-"}, {MetaName: "*v4.LicenseManager-test-stack1"}, {MetaName: "*v4.LicenseManager-test-stack1"}, } @@ -70,12 +69,16 @@ func TestApplyLicenseManager(t *testing.T) { client.InNamespace("test"), client.MatchingLabels(labels), } + listOpts1 := []client.ListOption{ + client.InNamespace("test"), + } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}} - - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12], funcCalls[13]} - updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} + {ListOpts: listOpts}, + {ListOpts: listOpts1}, + } + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0], listmockCall[1]}} + updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} + updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0], listmockCall[1]}} current := enterpriseApi.LicenseManager{ TypeMeta: metav1.TypeMeta{ Kind: "LicenseManager", diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index ffd82fc3c..7ac5f622e 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -382,8 +382,8 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } objectList := enterpriseApi.MonitoringConsoleList{} err := client.List(ctx, &objectList, opts...) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } // check with instance has the required ClusterManagerRef From f9c171f0873b8287c2c77eead77d1c66474c0b0b Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 14:52:43 -0700 Subject: [PATCH 40/52] Handled not found error --- pkg/splunk/enterprise/clustermanager.go | 4 ++-- pkg/splunk/enterprise/monitoringconsole.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 392c45f05..748d608a4 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -517,8 +517,8 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller Name: cr.Spec.ClusterManagerRef.Name, } err := c.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } } else { // List out all the ClusterManager instances in the namespace diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 7ac5f622e..7482afc56 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -372,8 +372,8 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co Name: cr.Spec.MonitoringConsoleRef.Name, } err := client.Get(ctx, namespacedName, monitoringConsoleInstance) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } } else { // List out all the MonitoringConsole instances in the namespace From 6c6b99527b0a1b8af8b535d01362d507d2c630cd Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 10:48:48 -0700 Subject: [PATCH 41/52] Added MC functions --- pkg/splunk/enterprise/clustermanager.go | 5 + pkg/splunk/enterprise/monitoringconsole.go | 64 +++++++ .../enterprise/monitoringconsole_test.go | 170 ++++++++++++++++++ 3 files changed, 239 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 748d608a4..962ba8d7b 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -232,6 +232,11 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + + err = changeMonitoringConsoleAnnotations(ctx, client, cr) + if err != nil { + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 7482afc56..344864b22 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -137,6 +137,12 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie return result, err } + // check if the Monitoring Console is ready for version upgrade, if required + continueReconcile, err := isMonitoringConsoleReadyForUpgrade(ctx, client, cr) + if err != nil || !continueReconcile { + return result, err + } + mgr := splctrl.DefaultStatefulSetPodManager{} phase, err := mgr.Update(ctx, client, statefulSet, 1) if err != nil { @@ -357,6 +363,64 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } +// isMonitoringConsoleReadyForUpgrade checks if MonitoringConsole can be upgraded if a version upgrade is in-progress +// No-operation otherwise; returns bool, err accordingly +func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.MonitoringConsole) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("isMonitoringConsoleReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + // check if a LicenseManager is attached to the instance + clusterManagerRef := cr.Spec.ClusterManagerRef + if clusterManagerRef.Name == "" { + return true, nil + } + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} + clusterManager := &enterpriseApi.ClusterManager{} + + // get the cluster manager referred in cluster manager + err = c.Get(ctx, namespacedName, clusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not find the Cluster Manager. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager") + return true, err + } + + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") + return false, err + } + + mcImage, err := getCurrentImage(ctx, c, cr, SplunkMonitoringConsole) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsolerReadyForUpgrade", fmt.Sprintf("Could not get the Monitoring Console Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get monitoring console current image") + return false, err + } + + // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + if (cr.Spec.Image != mcImage) && (clusterManager.Status.Phase != enterpriseApi.PhaseReady || cmImage != cr.Spec.Image) { + return false, nil + } + + return true, nil +} + // changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index 72efd15a7..e72750ec1 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1100,3 +1100,173 @@ func TestGetMonitoringConsoleList(t *testing.T) { t.Errorf("Got wrong number of IndexerCluster objects. Expected=%d, Got=%d", 1, numOfObjects) } } + +func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create Cluster Manager + cm := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err := client.Create(ctx, &cm) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &cm) + if err != nil { + t.Errorf("Unexpected status update %v", err) + debug.PrintStack() + } + + // Create Monitoring Console + mc := enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err = client.Create(ctx, &mc) + _, err = ApplyMonitoringConsole(ctx, client, &mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + mc.Spec.Image = "splunk2" + cm.Spec.Image = "splunk2" + _, err = ApplyClusterManager(ctx, client, &cm) + + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("isMonitoringConsoleReadyForUpgrade should not have returned error=%v", err) + } + + check, err := isMonitoringConsoleReadyForUpgrade(ctx, client, monitoringConsole) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("isMonitoringConsoleReadyForUpgrade: MC should be ready for upgrade") + } +} + +func TestChangeMonitoringConsoleAnnotations(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // define CM and MC + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + + mc := &enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + cm.Spec.Image = "splunk/splunk:latest" + + // Create the instances + client.Create(ctx, cm) + _, err := ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, cm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, mc) + _, err = ApplyMonitoringConsole(ctx, client, mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + err = changeMonitoringConsoleAnnotations(ctx, client, cm) + if err != nil { + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) + } + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) + } + + annotations := monitoringConsole.GetAnnotations() + if annotations["splunk/image-tag"] != cm.Spec.Image { + t.Errorf("changeMonitoringConsoleAnnotations should have set the checkUpdateImage annotation field to the current image") + } +} From e2e443383c03f4f300a7727d6b72532c37ca771e Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 11:10:41 -0700 Subject: [PATCH 42/52] Removed blank lines; handled errors in changeAnnotation --- pkg/splunk/enterprise/clustermanager.go | 26 ++++++++++++++------- pkg/splunk/enterprise/monitoringconsole.go | 27 ++++++++++++++-------- pkg/splunk/enterprise/util.go | 6 +---- 3 files changed, 37 insertions(+), 22 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 748d608a4..5620ddc2e 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -517,8 +517,11 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller Name: cr.Spec.ClusterManagerRef.Name, } err := c.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } } else { // List out all the ClusterManager instances in the namespace @@ -527,11 +530,14 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } objectList := enterpriseApi.ClusterManagerList{} err := c.List(ctx, &objectList, opts...) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } - // check with instance has the required LicenseManagerRef + // check if instance has the required LicenseManagerRef for _, cm := range objectList.Items { if cm.Spec.LicenseManagerRef.Name == cr.GetName() { clusterManagerInstance = &cm @@ -544,9 +550,13 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } } - image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - err := changeAnnotations(ctx, c, image, clusterManagerInstance) - + image, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) + if err != nil { + eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not get the LicenseManager Image. Reason %v", err)) + scopedLog.Error(err, "Get LicenseManager Image failed with", "error", err) + return err + } + err = changeAnnotations(ctx, c, image, clusterManagerInstance) if err != nil { eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 7482afc56..375832a4e 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -372,8 +372,11 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co Name: cr.Spec.MonitoringConsoleRef.Name, } err := client.Get(ctx, namespacedName, monitoringConsoleInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } } else { // List out all the MonitoringConsole instances in the namespace @@ -382,11 +385,14 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } objectList := enterpriseApi.MonitoringConsoleList{} err := client.List(ctx, &objectList, opts...) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } - // check with instance has the required ClusterManagerRef + // check if instance has the required ClusterManagerRef for _, mc := range objectList.Items { if mc.Spec.ClusterManagerRef.Name == cr.GetName() { monitoringConsoleInstance = &mc @@ -399,9 +405,13 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } } - image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - err := changeAnnotations(ctx, client, image, monitoringConsoleInstance) - + image, err := getCurrentImage(ctx, client, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not get the ClusterManager Image. Reason %v", err)) + scopedLog.Error(err, "Get ClusterManager Image failed with", "error", err) + return err + } + err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) if err != nil { eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) scopedLog.Error(err, "MonitoringConsole types update after changing annotations failed with", "error", err) @@ -409,5 +419,4 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } return nil - } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index ceba58ace..2876565a5 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2288,10 +2288,7 @@ func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splco if len(statefulSet.Spec.Template.Spec.Containers) > 0 { return statefulSet.Spec.Template.Spec.Containers[0].Image, nil } - err = fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) - - return "", err - + return "", fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) } // changeAnnotations updates the splunk/image-tag field to trigger the reconcile loop, and returns error if something is wrong @@ -2316,5 +2313,4 @@ func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image } return nil - } From fe1d66f0a451c61120925d4398d4b349c72089d6 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 11:20:31 -0700 Subject: [PATCH 43/52] Only call changeAnnotation if LM is ready --- pkg/splunk/enterprise/licensemanager.go | 10 ++++++---- pkg/splunk/enterprise/licensemanager_test.go | 8 ++------ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 828a169d5..ad572de10 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -172,6 +172,12 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + + // trigger ClusterManager reconcile by changing the splunk/image-tag annotation + err = changeClusterManagerAnnotations(ctx, client, cr) + if err != nil { + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. @@ -179,10 +185,6 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, result.RequeueAfter = 0 } - err = changeClusterManagerAnnotations(ctx, client, cr) - if err != nil { - return result, err - } return result, nil } diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 4199ae504..2979fcd1b 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -69,16 +69,12 @@ func TestApplyLicenseManager(t *testing.T) { client.InNamespace("test"), client.MatchingLabels(labels), } - listOpts1 := []client.ListOption{ - client.InNamespace("test"), - } listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}, - {ListOpts: listOpts1}, } - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0], listmockCall[1]}} + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} - updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0], listmockCall[1]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} current := enterpriseApi.LicenseManager{ TypeMeta: metav1.TypeMeta{ Kind: "LicenseManager", From 451588009e432909a2792d928e9edd1d054e5a91 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 14:35:00 -0700 Subject: [PATCH 44/52] Removed redundant checks --- pkg/splunk/enterprise/clustermanager.go | 9 ++++++--- pkg/splunk/enterprise/monitoringconsole.go | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 5620ddc2e..167ef3f52 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -475,9 +475,12 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller // get the license manager referred in cluster manager err = c.Get(ctx, namespacedName, licenseManager) if err != nil { + if k8serrors.IsNotFound(err) { + return true, nil + } eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the License Manager. Reason %v", err)) scopedLog.Error(err, "Unable to get licenseManager") - return true, err + return false, err } lmImage, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) @@ -518,7 +521,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } err := c.Get(ctx, namespacedName, clusterManagerInstance) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { return nil } return err @@ -531,7 +534,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller objectList := enterpriseApi.ClusterManagerList{} err := c.List(ctx, &objectList, opts...) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if err.Error() == "NotFound" { return nil } return err diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 375832a4e..9b9b1f534 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -373,7 +373,7 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } err := client.Get(ctx, namespacedName, monitoringConsoleInstance) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { return nil } return err @@ -386,7 +386,7 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co objectList := enterpriseApi.MonitoringConsoleList{} err := client.List(ctx, &objectList, opts...) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if err.Error() == "NotFound" { return nil } return err From b2d7bc18921ce9aff5669af28fa037439d6ed736 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 15:36:09 -0700 Subject: [PATCH 45/52] Return if CM list is empty --- pkg/splunk/enterprise/clustermanager.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 167ef3f52..e25c13ea7 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -540,6 +540,10 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller return err } + if len(objectList.Items) == 0 { + return nil + } + // check if instance has the required LicenseManagerRef for _, cm := range objectList.Items { if cm.Spec.LicenseManagerRef.Name == cr.GetName() { From 0d178a1e4719713d27ef827fdd54f8c0daaba5f4 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 15:49:51 -0700 Subject: [PATCH 46/52] removed superfluous nil err check --- pkg/splunk/enterprise/util.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 2876565a5..a42fe6e9b 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2308,9 +2308,5 @@ func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image cr.SetAnnotations(annotations) err := c.Update(ctx, cr) - if err != nil { - return err - } - - return nil + return err } From 77f9a749fa3989f55f17c6217354f8ca83bf1631 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 20:47:26 -0700 Subject: [PATCH 47/52] Removed branch from workflow --- .github/workflows/helm-test-workflow.yml | 1 - .github/workflows/int-test-workflow.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index d2e9b7aff..e68dc44d7 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,7 +2,6 @@ name: Helm Test WorkFlow on: push: branches: - - CSPL-2094-LM-upgrade-strategy - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 25a85105a..3dd4eed22 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,7 +2,6 @@ name: Integration Test WorkFlow on: push: branches: - - CSPL-2094-LM-upgrade-strategy - develop - main - feature** From 130c778cae132d3d4c0c2eaa543890eb870f36b3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 14 Jul 2023 10:48:16 -0700 Subject: [PATCH 48/52] Added branch to workflow --- .github/workflows/helm-test-workflow.yml | 1 + .github/workflows/int-test-workflow.yml | 1 + pkg/splunk/enterprise/clustermanager.go | 2 +- pkg/splunk/enterprise/monitoringconsole.go | 3 +++ 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index e68dc44d7..0b7ed9275 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,6 +2,7 @@ name: Helm Test WorkFlow on: push: branches: + - cspl-2343 - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 3dd4eed22..d132bafdb 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,6 +2,7 @@ name: Integration Test WorkFlow on: push: branches: + - cspl-2343 - develop - main - feature** diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 5a9e11fb3..dba7955b7 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -233,6 +233,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + // trigger MonitoringConsole reconcile by changing the splunk/image-tag annotation err = changeMonitoringConsoleAnnotations(ctx, client, cr) if err != nil { return result, err @@ -544,7 +545,6 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } return err } - if len(objectList.Items) == 0 { return nil } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 0d0d9beef..0c12c7ab9 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -455,6 +455,9 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } return err } + if len(objectList.Items) == 0 { + return nil + } // check if instance has the required ClusterManagerRef for _, mc := range objectList.Items { From 077f13080dec46b6fefabc2b5f78569a3f71011a Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 09:06:49 -0700 Subject: [PATCH 49/52] Fixed comment --- pkg/splunk/enterprise/monitoringconsole.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 0c12c7ab9..5ba8ff25b 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -391,7 +391,7 @@ func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.Control namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} clusterManager := &enterpriseApi.ClusterManager{} - // get the cluster manager referred in cluster manager + // get the cluster manager referred in monitoring console err = c.Get(ctx, namespacedName, clusterManager) if err != nil { eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not find the Cluster Manager. Reason %v", err)) From 52914ddcc72fac2833d2eb3df43718e6441d31b3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 09:20:44 -0700 Subject: [PATCH 50/52] Fixed unit test --- pkg/splunk/enterprise/clustermanager_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 63314c870..01be64670 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -531,9 +531,14 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { runtime.InNamespace("test"), runtime.MatchingLabels(labels), } + listOpts1 := []runtime.ListOption{ + runtime.InNamespace("test"), + } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} + {ListOpts: listOpts}, + {ListOpts: listOpts1}, + } + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0], listmockCall[1]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[8]}, "List": {listmockCall[0]}} current := enterpriseApi.ClusterManager{ From 26efb68b0359a2a2e198111b91cfe474441e8c95 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 20 Jul 2023 13:13:08 -0700 Subject: [PATCH 51/52] Improved comment for the upgrade condition --- pkg/splunk/enterprise/clustermanager.go | 3 ++- pkg/splunk/enterprise/monitoringconsole.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index dba7955b7..fad4c23b8 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -503,7 +503,8 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return false, err } - // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + // check if an image upgrade is happening and whether LM has finished updating yet, return false to stop + // further reconcile operations on CM until LM is ready if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || lmImage != cr.Spec.Image) { return false, nil } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 5ba8ff25b..50c16891a 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -413,7 +413,8 @@ func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.Control return false, err } - // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + // check if an image upgrade is happening and whether CM has finished updating yet, return false to stop + // further reconcile operations on MC until CM is ready if (cr.Spec.Image != mcImage) && (clusterManager.Status.Phase != enterpriseApi.PhaseReady || cmImage != cr.Spec.Image) { return false, nil } From 5a0dc922851c46fbaf8363d34cf7f7f632806d97 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 21 Jul 2023 10:28:32 -0700 Subject: [PATCH 52/52] Removed branch from workflow --- .github/workflows/helm-test-workflow.yml | 1 - .github/workflows/int-test-workflow.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 0b7ed9275..e68dc44d7 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,7 +2,6 @@ name: Helm Test WorkFlow on: push: branches: - - cspl-2343 - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index d132bafdb..3dd4eed22 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,7 +2,6 @@ name: Integration Test WorkFlow on: push: branches: - - cspl-2343 - develop - main - feature**