diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml new file mode 100644 index 000000000..d8ecc7336 --- /dev/null +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -0,0 +1,10 @@ +# Entrypoint for helm automation +apiVersion: kuttl.dev/v1beta1 +kind: TestSuite +testDirs: +- ./kuttl/tests/upgrade +parallel: 3 +timeout: 5000 +startKIND: false +artifactsDir: kuttl-artifacts +kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml new file mode 100644 index 000000000..142b71272 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml @@ -0,0 +1,9 @@ +--- +# assert for splunk operator deployment to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml new file mode 100644 index 000000000..a10c31557 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} --namespace ${NAMESPACE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml new file mode 100644 index 000000000..4b09ebf54 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -0,0 +1,9 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml new file mode 100644 index 000000000..f5689caa5 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} --namespace ${NAMESPACE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml new file mode 100644 index 000000000..59008dd62 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -0,0 +1,24 @@ +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..c3c560798 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,40 @@ +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml new file mode 100644 index 000000000..368902426 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -0,0 +1,24 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml new file mode 100644 index 000000000..cf9d19cf8 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} + namespaced: true diff --git a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml new file mode 100644 index 000000000..fd00ad06d --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml @@ -0,0 +1,50 @@ +splunk-operator: + enabled: true + splunkOperator: + clusterWideAccess: false + +sva: + c3: + enabled: true + + clusterManager: + name: cm + + indexerClusters: + - name: idxc + + searchHeadClusters: + - name: shc + + +indexerCluster: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + + serviceTemplate: + spec: + type: LoadBalancer + +clusterManager: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" + +searchHeadCluster: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index d45206475..e25c13ea7 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -23,6 +23,7 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" @@ -31,6 +32,8 @@ import ( splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -177,6 +180,12 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } + // check if the ClusterManager is ready for version upgrade, if required + continueReconcile, err := isClusterManagerReadyForUpgrade(ctx, client, cr) + if err != nil || !continueReconcile { + return result, err + } + clusterManagerManager := splctrl.DefaultStatefulSetPodManager{} phase, err := clusterManagerManager.Update(ctx, client, statefulSet, 1) if err != nil { @@ -434,3 +443,132 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, } return extraEnv, err } + +// isClusterManagerReadyForUpgrade checks if ClusterManager can be upgraded if a version upgrade is in-progress +// No-operation otherwise; returns bool, err accordingly +func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + // check if a LicenseManager is attached to the instance + licenseManagerRef := cr.Spec.LicenseManagerRef + if licenseManagerRef.Name == "" { + return true, nil + } + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} + licenseManager := &enterpriseApi.LicenseManager{} + + // get the license manager referred in cluster manager + err = c.Get(ctx, namespacedName, licenseManager) + if err != nil { + if k8serrors.IsNotFound(err) { + return true, nil + } + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the License Manager. Reason %v", err)) + scopedLog.Error(err, "Unable to get licenseManager") + return false, err + } + + lmImage, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) + if err != nil { + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get licenseManager current image") + return false, err + } + + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") + return false, err + } + + // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || lmImage != cr.Spec.Image) { + return false, nil + } + + return true, nil +} + +// changeClusterManagerAnnotations updates the splunk/image-tag field of the ClusterManager annotations to trigger the reconcile loop +// on update, and returns error if something is wrong +func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + clusterManagerInstance := &enterpriseApi.ClusterManager{} + if len(cr.Spec.ClusterManagerRef.Name) > 0 { + // if the LicenseManager holds the ClusterManagerRef + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + err := c.Get(ctx, namespacedName, clusterManagerInstance) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + } else { + // List out all the ClusterManager instances in the namespace + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + objectList := enterpriseApi.ClusterManagerList{} + err := c.List(ctx, &objectList, opts...) + if err != nil { + if err.Error() == "NotFound" { + return nil + } + return err + } + + if len(objectList.Items) == 0 { + return nil + } + + // check if instance has the required LicenseManagerRef + for _, cm := range objectList.Items { + if cm.Spec.LicenseManagerRef.Name == cr.GetName() { + clusterManagerInstance = &cm + break + } + } + + if len(clusterManagerInstance.GetName()) == 0 { + return nil + } + } + + image, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) + if err != nil { + eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not get the LicenseManager Image. Reason %v", err)) + scopedLog.Error(err, "Get LicenseManager Image failed with", "error", err) + return err + } + err = changeAnnotations(ctx, c, image, clusterManagerInstance) + if err != nil { + eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) + return err + } + + return nil +} diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 190e23f75..63314c870 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1384,6 +1384,176 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } +func TestIsClusterManagerReadyForUpgrade(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create License Manager + lm := enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err := client.Create(ctx, &lm) + _, err = ApplyLicenseManager(ctx, client, &lm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + lm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &lm) + if err != nil { + t.Errorf("Unexpected status update %v", err) + debug.PrintStack() + } + + // Create Cluster Manager + cm := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + LicenseManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err = client.Create(ctx, &cm) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + cm.Spec.Image = "splunk2" + lm.Spec.Image = "splunk2" + _, err = ApplyLicenseManager(ctx, client, &lm) + + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, clusterManager) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + check, err := isClusterManagerReadyForUpgrade(ctx, client, clusterManager) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("isClusterManagerReadyForUpgrade: CM should be ready for upgrade") + } +} + +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + + // define LM and CM + lm := &enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lm", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + LicenseManagerRef: corev1.ObjectReference{ + Name: "test-lm", + }, + }, + }, + } + lm.Spec.Image = "splunk/splunk:latest" + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create the instances + client.Create(ctx, lm) + _, err := ApplyLicenseManager(ctx, client, lm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + lm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, lm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, cm) + _, err = ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + err = changeClusterManagerAnnotations(ctx, client, lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, clusterManager) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + annotations := clusterManager.GetAnnotations() + if annotations["splunk/image-tag"] != lm.Spec.Image { + t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + } +} + func TestClusterManagerWitReadyState(t *testing.T) { // create directory for app framework newpath := filepath.Join("/tmp", "appframework") diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 83a1048dd..ad572de10 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -172,12 +172,19 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + + // trigger ClusterManager reconcile by changing the splunk/image-tag annotation + err = changeClusterManagerAnnotations(ctx, client, cr) + if err != nil { + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. if !result.Requeue { result.RequeueAfter = 0 } + return result, nil } diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index dbdaf153c..2979fcd1b 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -70,8 +70,8 @@ func TestApplyLicenseManager(t *testing.T) { client.MatchingLabels(labels), } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}} - + {ListOpts: listOpts}, + } createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 8979877de..9b9b1f534 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -34,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -355,3 +356,67 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } } + +// changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(client, cr) + + monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} + if len(cr.Spec.MonitoringConsoleRef.Name) > 0 { + // if the ClusterManager holds the MonitoringConsoleRef + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.MonitoringConsoleRef.Name, + } + err := client.Get(ctx, namespacedName, monitoringConsoleInstance) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + } else { + // List out all the MonitoringConsole instances in the namespace + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + objectList := enterpriseApi.MonitoringConsoleList{} + err := client.List(ctx, &objectList, opts...) + if err != nil { + if err.Error() == "NotFound" { + return nil + } + return err + } + + // check if instance has the required ClusterManagerRef + for _, mc := range objectList.Items { + if mc.Spec.ClusterManagerRef.Name == cr.GetName() { + monitoringConsoleInstance = &mc + break + } + } + + if len(monitoringConsoleInstance.GetName()) == 0 { + return nil + } + } + + image, err := getCurrentImage(ctx, client, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not get the ClusterManager Image. Reason %v", err)) + scopedLog.Error(err, "Get ClusterManager Image failed with", "error", err) + return err + } + err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) + if err != nil { + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "MonitoringConsole types update after changing annotations failed with", "error", err) + return err + } + + return nil +} diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 35c1f3cbf..a42fe6e9b 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2272,3 +2272,41 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32) } return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) } + +// getCurrentImage gets the image of the statefulset, returns the image, and error if something goes wrong +func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) (string, error) { + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(instanceType, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil { + return "", err + } + + if len(statefulSet.Spec.Template.Spec.Containers) > 0 { + return statefulSet.Spec.Template.Spec.Containers[0].Image, nil + } + return "", fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) +} + +// changeAnnotations updates the splunk/image-tag field to trigger the reconcile loop, and returns error if something is wrong +func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image string, cr splcommon.MetaObject) error { + annotations := cr.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["splunk/image-tag"]; ok { + if annotations["splunk/image-tag"] == image { + return nil + } + } + + // create/update the checkUpdateImage annotation field + annotations["splunk/image-tag"] = image + + cr.SetAnnotations(annotations) + err := c.Update(ctx, cr) + return err +} diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index a15d5913e..64587db8a 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -3149,3 +3149,43 @@ func TestGetLicenseMasterURL(t *testing.T) { t.Errorf("Expected a valid return value") } } +func TestGetCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + err := client.Create(ctx, ¤t) + _, err = ApplyClusterManager(ctx, client, ¤t) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + instanceType := SplunkClusterManager + + image, err := getCurrentImage(ctx, client, ¤t, instanceType) + + if err != nil { + t.Errorf("Unexpected getCurrentImage error %v", err) + } + if image != current.Spec.Image { + t.Errorf("getCurrentImage does not return the current statefulset image") + } + +}