From 13cceb0697db33a8477c0ec276e0c28e036629af Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Wed, 26 Mar 2025 16:40:32 -0700 Subject: [PATCH] pkg/operator/status: Drop kubelet skew guard, add RHEL guard The kubelet skew guards are from 1471d2c4e7 (Bug 1986453: Check for API server and node versions skew, 2021-07-27, #2658). But the Kube API server also landed similar guards in openshift/cluster-kube-apiserver-operator@9ce4f74775 (add KubeletVersionSkewController, 2021-08-26, openshift/cluster-kube-apiserver-operator#1199). openshift/enhancements@0ba744e750 (eus-upgrades-mvp: don't enforce skew check in MCO, 2021-04-29, openshift/enhancements#762) had shifted the proposal form MCO-guards to KAS-guards, so I'm not entirely clear on why the MCO guards landed at all. But it's convenient for me that they did, because while I'm dropping them here, I'm recycling the Node lister for a new check. 4.19 is dropping bare, package-managed RHEL support. I'd initially thought about looking for RHEL entries like: osImage: Red Hat Enterprise Linux 8.6 (Ootpa) while excluding RHCOS entries like: osImage: Red Hat Enterprise Linux CoreOS 419.96.202503032242-0 But instead of switching on osImage, I'm using the node.openshift.io/os_id label to find package-managed RHEL Nodes. The machine-config operator is setting up the label [1] based on the ID value in /etc/os-release. On RHCOS instances, the ID value is 'rhcos' [2]. On package-managed RHEL, it's 'rhel' [3,4]. [1]: https://github.com/openshift/machine-config-operator/blob/ddc18e84f4a0650e0e87aa0a4f90f9cf01b5259c/templates/worker/01-worker-kubelet/_base/units/kubelet.service.yaml#L19-L31 [2]: https://github.com/openshift/os/blob/41f6a028d37b750db0bf4257447d809bd9cbe4bf/manifest-ocp-rhel-9.6.yaml#L41 [3]: https://github.com/openshift/enhancements/blob/ea465e192bfb58ec8654f1c904a4af68777f68ec/enhancements/rhcos/split-rhcos-into-layers.md?plain=1#L416 [4]: https://github.com/openshift/machine-config-operator/blob/ddc18e84f4a0650e0e87aa0a4f90f9cf01b5259c/pkg/daemon/osrelease/osrelease.go#L69 --- pkg/operator/status.go | 181 +++++-------------- pkg/operator/status_test.go | 351 ------------------------------------ 2 files changed, 41 insertions(+), 491 deletions(-) diff --git a/pkg/operator/status.go b/pkg/operator/status.go index 5e4bd15292..fe75ac8564 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -6,7 +6,6 @@ import ( "fmt" "reflect" "sort" - "strconv" "strings" "time" @@ -254,13 +253,6 @@ func (optr *Operator) syncDegradedStatus(co *configv1.ClusterOperator, ierr sync cov1helpers.SetStatusCondition(&co.Status.Conditions, coDegradedCondition) } -const ( - skewUnchecked = "KubeletSkewUnchecked" - skewSupported = "KubeletSkewSupported" - skewUnsupported = "KubeletSkewUnsupported" - skewPresent = "KubeletSkewPresent" -) - // syncUpgradeableStatus applies the new condition to the mco's ClusterOperator object. func (optr *Operator) syncUpgradeableStatus(co *configv1.ClusterOperator) error { @@ -321,37 +313,20 @@ func (optr *Operator) syncUpgradeableStatus(co *configv1.ClusterOperator) error coStatusCondition.Message = "One or more machine config pools are updating, please see `oc get mcp` for further details" } - // don't overwrite status if updating or degraded - if !updating && !degraded && !interrupted { - skewStatus, status, err := optr.isKubeletSkewSupported(pools) + // don't overwrite status if already grumpy + if coStatusCondition.Status == configv1.ConditionTrue { + condition, err := optr.checkNodeUpgradeable() if err != nil { - klog.Errorf("Error checking version skew: %v, kubelet skew status: %v, status reason: %v, status message: %v", err, skewStatus, status.Reason, status.Message) - coStatusCondition.Reason = status.Reason - coStatusCondition.Message = status.Message - cov1helpers.SetStatusCondition(&co.Status.Conditions, coStatusCondition) - } - switch skewStatus { - case skewUnchecked: - coStatusCondition.Reason = status.Reason - coStatusCondition.Message = status.Message - cov1helpers.SetStatusCondition(&co.Status.Conditions, coStatusCondition) - case skewUnsupported: - coStatusCondition.Reason = status.Reason - coStatusCondition.Message = status.Message - mcoObjectRef := &corev1.ObjectReference{ - Kind: co.Kind, - Name: co.Name, - Namespace: co.Namespace, - UID: co.GetUID(), - } - klog.Infof("kubelet skew status: %v, status reason: %v", skewStatus, status.Reason) - optr.eventRecorder.Eventf(mcoObjectRef, corev1.EventTypeWarning, coStatusCondition.Reason, coStatusCondition.Message) - cov1helpers.SetStatusCondition(&co.Status.Conditions, coStatusCondition) - case skewPresent: - coStatusCondition.Reason = status.Reason - coStatusCondition.Message = status.Message - klog.Infof("kubelet skew status: %v, status reason: %v", skewStatus, status.Reason) + msg := fmt.Sprintf("Error checking Nodes for Upgradeable status: %v", err) + klog.Error(msg) + coStatusCondition.Status = configv1.ConditionUnknown + coStatusCondition.Reason = condition.Reason + coStatusCondition.Message = condition.Message cov1helpers.SetStatusCondition(&co.Status.Conditions, coStatusCondition) + } else if condition.Status != configv1.ConditionTrue { + coStatusCondition.Status = condition.Status + coStatusCondition.Reason = condition.Reason + coStatusCondition.Message = condition.Message } } cov1helpers.SetStatusCondition(&co.Status.Conditions, coStatusCondition) @@ -525,83 +500,43 @@ func (optr *Operator) cfeEvalCgroupsV1() (bool, error) { return nodeClusterConfig.Spec.CgroupMode == configv1.CgroupModeV1, nil } -// isKubeletSkewSupported checks the version skew of kube-apiserver and node kubelet version. -// Returns the skew status. version skew > 2 is not supported. -func (optr *Operator) isKubeletSkewSupported(pools []*mcfgv1.MachineConfigPool) (skewStatus string, coStatus configv1.ClusterOperatorStatusCondition, err error) { - coStatus = configv1.ClusterOperatorStatusCondition{} - kubeAPIServerStatus, err := optr.clusterOperatorLister.Get("kube-apiserver") - if err != nil { - coStatus.Reason = skewUnchecked - coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err) - return skewUnchecked, coStatus, err - } - // looks like - // - name: kube-apiserver - // version: 1.21.0-rc.0 - kubeAPIServerVersion := "" - for _, version := range kubeAPIServerStatus.Status.Versions { - if version.Name != "kube-apiserver" { - continue - } - kubeAPIServerVersion = version.Version - break - } - if kubeAPIServerVersion == "" { - err = fmt.Errorf("kube-apiserver does not yet have a version") - coStatus.Reason = skewUnchecked - coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err.Error()) - return skewUnchecked, coStatus, err +// checkNodeUpgradeable checks current Node status to look for anything incompatible with the next 4.(y+1) OpenShift release. +func (optr *Operator) checkNodeUpgradeable() (coStatus configv1.ClusterOperatorStatusCondition, err error) { + coStatus = configv1.ClusterOperatorStatusCondition{ + Status: configv1.ConditionTrue, } - kubeAPIServerMinorVersion, err := getMinorKubeletVersion(kubeAPIServerVersion) + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "node.openshift.io/os_id": "rhel", + }, + }) if err != nil { - coStatus.Reason = skewUnchecked - coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err) - return skewUnchecked, coStatus, err + err = fmt.Errorf("label selector for node.openshift.io/os_id=rhel failed: %w", err) + coStatus.Status = configv1.ConditionUnknown + coStatus.Reason = "FailedToGetSelector" + coStatus.Message = err.Error() + return coStatus, err } - var ( - lastError error - kubeletVersion string - ) - nodes, err := optr.GetAllManagedNodes(pools) + nodes, err := optr.nodeLister.List(selector) if err != nil { - err = fmt.Errorf("getting all managed nodes failed: %w", err) - coStatus.Reason = skewUnchecked - coStatus.Message = fmt.Sprintf("An error occurred when getting all the managed nodes: %v", err.Error()) + err = fmt.Errorf("could not list nodes for %s: %w", selector, err) + coStatus.Status = configv1.ConditionUnknown + coStatus.Reason = "FailedToGetNodes" + coStatus.Message = err.Error() + return coStatus, err } + nonCoreOSNodes := make([]string, 0, len(nodes)) for _, node := range nodes { - // looks like kubeletVersion: v1.21.0-rc.0+6143dea - kubeletVersion = node.Status.NodeInfo.KubeletVersion - if kubeletVersion == "" { - continue - } - nodeMinorVersion, err := getMinorKubeletVersion(kubeletVersion) - if err != nil { - lastError = err - continue - } - if nodeMinorVersion+2 < kubeAPIServerMinorVersion { - coStatus.Reason = skewUnsupported - coStatus.Message = fmt.Sprintf("One or more nodes have an unsupported kubelet version skew. Please see `oc get nodes` for details and upgrade all nodes so that they have a kubelet version of at least %v.", getMinimalSkewSupportNodeVersion(kubeAPIServerVersion)) - return skewUnsupported, coStatus, nil - } - if nodeMinorVersion+2 == kubeAPIServerMinorVersion { - coStatus.Reason = skewPresent - coStatus.Message = fmt.Sprintf("Current kubelet version %v will not be supported by newer kube-apiserver. Please upgrade the kubelet first if plan to upgrade the kube-apiserver", kubeletVersion) - return skewPresent, coStatus, nil - } + osImage := node.Status.NodeInfo.OSImage + nonCoreOSNodes = append(nonCoreOSNodes, fmt.Sprintf("%s (%s)", node.Name, osImage)) } - if kubeletVersion == "" { - err = fmt.Errorf("kubelet does not yet have a version") - coStatus.Reason = skewUnchecked - coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err.Error()) - return skewUnchecked, coStatus, err + sort.Strings(nonCoreOSNodes) + if len(nonCoreOSNodes) > 0 { + coStatus.Status = configv1.ConditionFalse + coStatus.Reason = "RHELNodes" + coStatus.Message = fmt.Sprintf("%d RHEL nodes, including %s, but OpenShift 4.19 requires RHCOS https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/updating_clusters/preparing-to-update-a-cluster#updating-cluster-prepare-past-4-18", len(nonCoreOSNodes), nonCoreOSNodes[0]) } - if lastError != nil { - coStatus.Reason = skewUnchecked - coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err) - return skewUnchecked, coStatus, lastError - } - return skewSupported, coStatus, nil + return coStatus, nil } // GetAllManagedNodes returns the nodes managed by MCO @@ -621,40 +556,6 @@ func (optr *Operator) GetAllManagedNodes(pools []*mcfgv1.MachineConfigPool) ([]* return nodes, nil } -// getMinorKubeletVersion parses the minor version number of kubelet -func getMinorKubeletVersion(version string) (int, error) { - tokens := strings.Split(version, ".") - if len(tokens) < 2 { - return 0, fmt.Errorf("incorrect version syntax: %q", version) - } - minorVersion, err := strconv.ParseInt(tokens[1], 10, 32) - if err != nil { - return 0, err - } - return int(minorVersion), nil -} - -// getMinimalSkewSupportNodeVersion returns the minimal supported node kubelet version. -func getMinimalSkewSupportNodeVersion(version string) string { - // drop the pre-release and commit hash - idx := strings.Index(version, "-") - if idx >= 0 { - version = version[:idx] - } - - idx = strings.Index(version, "+") - if idx >= 0 { - version = version[:idx] - } - - tokens := strings.Split(version, ".") - if minorVersion, err := strconv.ParseInt(tokens[1], 10, 32); err == nil { - tokens[1] = strconv.Itoa(int(minorVersion) - 2) - return strings.Join(tokens, ".") - } - return version -} - func (optr *Operator) fetchClusterOperator() (*configv1.ClusterOperator, error) { co, err := optr.clusterOperatorLister.Get(optr.name) diff --git a/pkg/operator/status_test.go b/pkg/operator/status_test.go index 3217ad59e3..d3d3219260 100644 --- a/pkg/operator/status_test.go +++ b/pkg/operator/status_test.go @@ -8,7 +8,6 @@ import ( "testing" corelisterv1 "k8s.io/client-go/listers/core/v1" - clientgotesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" corev1 "k8s.io/api/core/v1" @@ -801,353 +800,3 @@ func TestInClusterBringUpStayOnErr(t *testing.T) { assert.False(t, optr.inClusterBringup) } - -func TestKubeletSkewUnSupported(t *testing.T) { - kasOperator := &configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"}, - Status: configv1.ClusterOperatorStatus{ - Versions: []configv1.OperandVersion{ - {Name: "kube-apiserver", Version: "1.21"}, - }, - }, - } - optr := &Operator{ - eventRecorder: &record.FakeRecorder{}, - fgAccessor: featuregates.NewHardcodedFeatureGateAccess( - []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, - ), - } - optr.vStore = newVersionStore() - optr.vStore.Set("operator", "test-version") - optr.mcpLister = &mockMCPLister{ - pools: []*mcfgv1.MachineConfigPool{ - helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"), - helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"), - }, - } - nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer) - nodeIndexer.Add(&corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "first-node", Labels: map[string]string{"node-role/worker": ""}}, - Status: corev1.NodeStatus{ - NodeInfo: corev1.NodeSystemInfo{ - KubeletVersion: "v1.18", - }, - }, - }) - - co := &configv1.ClusterOperator{} - configNode := &configv1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: ctrlcommon.ClusterNodeInstanceName}, - Spec: configv1.NodeSpec{}, - } - configNodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.nodeClusterLister = configlistersv1.NewNodeLister(configNodeIndexer) - configNodeIndexer.Add(configNode) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse}) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse}) - fakeClient := fakeconfigclientset.NewSimpleClientset(co, kasOperator) - optr.configClient = fakeClient - optr.inClusterBringup = true - - operatorIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.clusterOperatorLister = configlistersv1.NewClusterOperatorLister(operatorIndexer) - operatorIndexer.Add(co) - operatorIndexer.Add(kasOperator) - - configMapIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.mcoCmLister = corelisterv1.NewConfigMapLister(configMapIndexer) - - fn1 := func(config *renderConfig, co *configv1.ClusterOperator) error { - return errors.New("mocked fn1") - } - err := optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}}) - assert.NotNil(t, err, "expected syncAll to fail") - - assert.True(t, optr.inClusterBringup) - - fn1 = func(config *renderConfig, co *configv1.ClusterOperator) error { - return nil - } - err = optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}}) - assert.Nil(t, err, "expected syncAll to pass") - - assert.False(t, optr.inClusterBringup) - - var lastUpdate clientgotesting.UpdateAction - for _, action := range fakeClient.Actions() { - if action.GetVerb() == "update" { - lastUpdate = action.(clientgotesting.UpdateAction) - } - } - if lastUpdate == nil { - t.Fatal("missing update") - } - operatorStatus := lastUpdate.GetObject().(*configv1.ClusterOperator) - var upgradeable *configv1.ClusterOperatorStatusCondition - for _, condition := range operatorStatus.Status.Conditions { - if condition.Type == configv1.OperatorUpgradeable { - upgradeable = &condition - break - } - } - if upgradeable == nil { - t.Fatal("missing condition") - } - if upgradeable.Status != configv1.ConditionTrue { - t.Fatal(upgradeable) - } - if upgradeable.Message != "One or more nodes have an unsupported kubelet version skew. Please see `oc get nodes` for details and upgrade all nodes so that they have a kubelet version of at least 1.19." { - t.Fatal(upgradeable) - } - if upgradeable.Reason != "KubeletSkewUnsupported" { - t.Fatal(upgradeable) - } -} - -func TestCustomPoolKubeletSkewUnSupported(t *testing.T) { - customSelector := metav1.AddLabelToSelector(&metav1.LabelSelector{}, "node-role/custom", "") - kasOperator := &configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"}, - Status: configv1.ClusterOperatorStatus{ - Versions: []configv1.OperandVersion{ - {Name: "kube-apiserver", Version: "1.21"}, - }, - }, - } - optr := &Operator{ - eventRecorder: &record.FakeRecorder{}, - fgAccessor: featuregates.NewHardcodedFeatureGateAccess( - []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, - ), - } - optr.vStore = newVersionStore() - optr.vStore.Set("operator", "test-version") - optr.mcpLister = &mockMCPLister{ - pools: []*mcfgv1.MachineConfigPool{ - helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"), - helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"), - helpers.NewMachineConfigPool("custom", nil, customSelector, "v0"), - }, - } - nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer) - nodeIndexer.Add(&corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "custom", Labels: map[string]string{"node-role/custom": ""}}, - Status: corev1.NodeStatus{ - NodeInfo: corev1.NodeSystemInfo{ - KubeletVersion: "v1.18", - }, - }, - }) - - co := &configv1.ClusterOperator{} - configNode := &configv1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: ctrlcommon.ClusterNodeInstanceName}, - Spec: configv1.NodeSpec{}, - } - configNodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.nodeClusterLister = configlistersv1.NewNodeLister(configNodeIndexer) - configNodeIndexer.Add(configNode) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse}) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse}) - fakeClient := fakeconfigclientset.NewSimpleClientset(co, kasOperator) - optr.configClient = fakeClient - optr.inClusterBringup = true - - operatorIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.clusterOperatorLister = configlistersv1.NewClusterOperatorLister(operatorIndexer) - operatorIndexer.Add(co) - operatorIndexer.Add(kasOperator) - - configMapIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.mcoCmLister = corelisterv1.NewConfigMapLister(configMapIndexer) - - fn1 := func(config *renderConfig, co *configv1.ClusterOperator) error { - return errors.New("mocked fn1") - } - err := optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}}) - assert.NotNil(t, err, "expected syncAll to fail") - - assert.True(t, optr.inClusterBringup) - - fn1 = func(config *renderConfig, co *configv1.ClusterOperator) error { - return nil - } - err = optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}}) - assert.Nil(t, err, "expected syncAll to pass") - - assert.False(t, optr.inClusterBringup) - - var lastUpdate clientgotesting.UpdateAction - for _, action := range fakeClient.Actions() { - if action.GetVerb() == "update" { - lastUpdate = action.(clientgotesting.UpdateAction) - } - } - if lastUpdate == nil { - t.Fatal("missing update") - } - operatorStatus := lastUpdate.GetObject().(*configv1.ClusterOperator) - var upgradeable *configv1.ClusterOperatorStatusCondition - for _, condition := range operatorStatus.Status.Conditions { - if condition.Type == configv1.OperatorUpgradeable { - upgradeable = &condition - break - } - } - if upgradeable == nil { - t.Fatal("missing condition") - } - if upgradeable.Status != configv1.ConditionTrue { - t.Fatal(upgradeable) - } - if upgradeable.Message != "One or more nodes have an unsupported kubelet version skew. Please see `oc get nodes` for details and upgrade all nodes so that they have a kubelet version of at least 1.19." { - t.Fatal(upgradeable) - } - if upgradeable.Reason != "KubeletSkewUnsupported" { - t.Fatal(upgradeable) - } -} - -func TestKubeletSkewSupported(t *testing.T) { - kasOperator := &configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"}, - Status: configv1.ClusterOperatorStatus{ - Versions: []configv1.OperandVersion{ - {Name: "kube-apiserver", Version: "1.21"}, - }, - }, - } - optr := &Operator{ - eventRecorder: &record.FakeRecorder{}, - fgAccessor: featuregates.NewHardcodedFeatureGateAccess( - []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, - ), - } - optr.vStore = newVersionStore() - optr.vStore.Set("operator", "test-version") - optr.mcpLister = &mockMCPLister{ - pools: []*mcfgv1.MachineConfigPool{ - helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"), - helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"), - }, - } - nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer) - nodeIndexer.Add(&corev1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: "first-node", Labels: map[string]string{"node-role/worker": ""}}, - Status: corev1.NodeStatus{ - NodeInfo: corev1.NodeSystemInfo{ - KubeletVersion: "v1.20", - }, - }, - }) - - co := &configv1.ClusterOperator{} - configNode := &configv1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: ctrlcommon.ClusterNodeInstanceName}, - Spec: configv1.NodeSpec{}, - } - configNodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.nodeClusterLister = configlistersv1.NewNodeLister(configNodeIndexer) - configNodeIndexer.Add(configNode) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse}) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse}) - cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse}) - fakeClient := fakeconfigclientset.NewSimpleClientset(co, kasOperator) - optr.configClient = fakeClient - optr.inClusterBringup = true - - operatorIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.clusterOperatorLister = configlistersv1.NewClusterOperatorLister(operatorIndexer) - operatorIndexer.Add(co) - operatorIndexer.Add(kasOperator) - - configMapIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - optr.mcoCmLister = corelisterv1.NewConfigMapLister(configMapIndexer) - - fn1 := func(config *renderConfig, co *configv1.ClusterOperator) error { - return errors.New("mocked fn1") - } - err := optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}}) - assert.NotNil(t, err, "expected syncAll to fail") - - assert.True(t, optr.inClusterBringup) - - fn1 = func(config *renderConfig, co *configv1.ClusterOperator) error { - return nil - } - err = optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}}) - assert.Nil(t, err, "expected syncAll to pass") - - assert.False(t, optr.inClusterBringup) - - var lastUpdate clientgotesting.UpdateAction - for _, action := range fakeClient.Actions() { - if action.GetVerb() == "update" { - lastUpdate = action.(clientgotesting.UpdateAction) - } - } - if lastUpdate == nil { - t.Fatal("missing update") - } - operatorStatus := lastUpdate.GetObject().(*configv1.ClusterOperator) - var upgradeable *configv1.ClusterOperatorStatusCondition - for _, condition := range operatorStatus.Status.Conditions { - if condition.Type == configv1.OperatorUpgradeable { - upgradeable = &condition - break - } - } - if upgradeable == nil { - t.Fatal("missing condition") - } - if upgradeable.Status != configv1.ConditionTrue { - t.Fatal(upgradeable) - } - if upgradeable.Message != "" { - t.Fatal(upgradeable) - } - if upgradeable.Reason != "AsExpected" { - t.Fatal(upgradeable) - } -} - -func TestGetMinorKubeletVersion(t *testing.T) { - tcs := []struct { - version string - minor int - expectNilErr bool - }{ - {"v1.20.1", 20, true}, - {"v1.20.1+abc0", 20, true}, - {"v1.20.1+0123", 20, true}, - {"v1.20.1-rc", 20, true}, - {"v1.20.1-rc.1", 20, true}, - {"v1.20.1-rc+abc123", 20, true}, - {"v1.20.1-rc.0+abc123", 20, true}, - {"v1.20.1", 20, true}, - {"1.20.1", 20, true}, - {"1.20", 20, true}, - {"12", 0, false}, - {".xy", 0, false}, - {"1.xy.1", 0, false}, - } - for _, tc := range tcs { - minorV, err := getMinorKubeletVersion(tc.version) - if tc.expectNilErr && err != nil { - t.Errorf("test %q failed: unexpected error %v", tc.version, err) - continue - } - if !tc.expectNilErr && err == nil { - t.Errorf("test %q failed: expected error, got nil ", tc.version) - continue - } - if tc.expectNilErr { - assert.Equal(t, tc.minor, minorV, fmt.Sprintf("failed test %q", tc.version)) - } - } -}