Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions pkg/controller/clusterconfig/clusterconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,28 +91,28 @@ func (r *ReconcileClusterConfig) Reconcile(request reconcile.Request) (reconcile
// Validate the cluster config
if err := network.ValidateClusterConfig(clusterConfig.Spec); err != nil {
log.Printf("Failed to validate Network.Spec: %v", err)
r.status.SetFailing(statusmanager.ClusterConfig, "InvalidClusterConfig",
r.status.SetDegraded(statusmanager.ClusterConfig, "InvalidClusterConfig",
fmt.Sprintf("The cluster configuration is invalid (%v). Use 'oc edit network.config.openshift.io cluster' to fix.", err))
return reconcile.Result{}, err
}

operatorConfig, err := r.UpdateOperatorConfig(context.TODO(), *clusterConfig)
if err != nil {
log.Printf("Failed to generate NetworkConfig CRD: %v", err)
r.status.SetFailing(statusmanager.ClusterConfig, "UpdateOperatorConfig",
r.status.SetDegraded(statusmanager.ClusterConfig, "UpdateOperatorConfig",
fmt.Sprintf("Internal error while converting cluster configuration: %v", err))
return reconcile.Result{}, err
}

if operatorConfig != nil {
if err := apply.ApplyObject(context.TODO(), r.client, operatorConfig); err != nil {
log.Printf("Could not apply operator config: %v", err)
r.status.SetFailing(statusmanager.ClusterConfig, "ApplyOperatorConfig",
r.status.SetDegraded(statusmanager.ClusterConfig, "ApplyOperatorConfig",
fmt.Sprintf("Error while trying to update operator configuration: %v", err))
return reconcile.Result{}, err
}
}

r.status.SetNotFailing(statusmanager.ClusterConfig)
r.status.SetNotDegraded(statusmanager.ClusterConfig)
return reconcile.Result{}, nil
}
22 changes: 11 additions & 11 deletions pkg/controller/operconfig/operconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
err := r.client.Get(context.TODO(), request.NamespacedName, operConfig)
if err != nil {
if apierrors.IsNotFound(err) {
r.status.SetFailing(statusmanager.OperatorConfig, "NoOperatorConfig",
r.status.SetDegraded(statusmanager.OperatorConfig, "NoOperatorConfig",
fmt.Sprintf("Operator configuration %s was deleted", request.NamespacedName.String()))
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected, since we set
Expand All @@ -136,7 +136,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
// This will also commit the change back to the apiserver.
if err := r.MergeClusterConfig(context.TODO(), operConfig); err != nil {
log.Printf("Failed to merge the cluster configuration: %v", err)
r.status.SetFailing(statusmanager.OperatorConfig, "MergeClusterConfig",
r.status.SetDegraded(statusmanager.OperatorConfig, "MergeClusterConfig",
fmt.Sprintf("Internal error while merging cluster configuration and operator configuration: %v", err))
return reconcile.Result{}, err
}
Expand All @@ -147,7 +147,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
// Validate the configuration
if err := network.Validate(&operConfig.Spec); err != nil {
log.Printf("Failed to validate Network.operator.openshift.io.Spec: %v", err)
r.status.SetFailing(statusmanager.OperatorConfig, "InvalidOperatorConfig",
r.status.SetDegraded(statusmanager.OperatorConfig, "InvalidOperatorConfig",
fmt.Sprintf("The operator configuration is invalid (%v). Use 'oc edit network.operator.openshift.io cluster' to fix.", err))
return reconcile.Result{}, err
}
Expand All @@ -171,7 +171,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
err = network.IsChangeSafe(prev, &operConfig.Spec)
if err != nil {
log.Printf("Not applying unsafe change: %v", err)
r.status.SetFailing(statusmanager.OperatorConfig, "InvalidOperatorConfig",
r.status.SetDegraded(statusmanager.OperatorConfig, "InvalidOperatorConfig",
fmt.Sprintf("Not applying unsafe configuration change: %v. Use 'oc edit network.operator.openshift.io cluster' to undo the change.", err))
return reconcile.Result{}, err
}
Expand All @@ -181,7 +181,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
objs, err := network.Render(&operConfig.Spec, ManifestPath)
if err != nil {
log.Printf("Failed to render: %v", err)
r.status.SetFailing(statusmanager.OperatorConfig, "RenderError",
r.status.SetDegraded(statusmanager.OperatorConfig, "RenderError",
fmt.Sprintf("Internal error while rendering operator configuration: %v", err))
return reconcile.Result{}, err
}
Expand All @@ -190,7 +190,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
app, err := AppliedConfiguration(operConfig)
if err != nil {
log.Printf("Failed to render applied: %v", err)
r.status.SetFailing(statusmanager.OperatorConfig, "RenderError",
r.status.SetDegraded(statusmanager.OperatorConfig, "RenderError",
fmt.Sprintf("Internal error while recording new operator configuration: %v", err))
return reconcile.Result{}, err
}
Expand Down Expand Up @@ -220,7 +220,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
if err := controllerutil.SetControllerReference(operConfig, obj, r.scheme); err != nil {
err = errors.Wrapf(err, "could not set reference for (%s) %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
log.Println(err)
r.status.SetFailing(statusmanager.OperatorConfig, "InternalError",
r.status.SetDegraded(statusmanager.OperatorConfig, "InternalError",
fmt.Sprintf("Internal error while updating operator configuration: %v", err))
return reconcile.Result{}, err
}
Expand All @@ -238,7 +238,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
continue
}
}
r.status.SetFailing(statusmanager.OperatorConfig, "ApplyOperatorConfig",
r.status.SetDegraded(statusmanager.OperatorConfig, "ApplyOperatorConfig",
fmt.Sprintf("Error while updating operator configuration: %v", err))
return reconcile.Result{}, err
}
Expand All @@ -248,7 +248,7 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
status, err := r.ClusterNetworkStatus(context.TODO(), operConfig)
if err != nil {
log.Printf("Could not generate network status: %v", err)
r.status.SetFailing(statusmanager.OperatorConfig, "StatusError",
r.status.SetDegraded(statusmanager.OperatorConfig, "StatusError",
fmt.Sprintf("Could not update cluster configuration status: %v", err))
return reconcile.Result{}, err
}
Expand All @@ -258,13 +258,13 @@ func (r *ReconcileOperConfig) Reconcile(request reconcile.Request) (reconcile.Re
if err := apply.ApplyObject(context.TODO(), r.client, status); err != nil {
err = errors.Wrapf(err, "could not apply (%s) %s/%s", status.GroupVersionKind(), status.GetNamespace(), status.GetName())
log.Println(err)
r.status.SetFailing(statusmanager.OperatorConfig, "StatusError",
r.status.SetDegraded(statusmanager.OperatorConfig, "StatusError",
fmt.Sprintf("Could not update cluster configuration status: %v", err))
return reconcile.Result{}, err
}
}

r.status.SetNotFailing(statusmanager.OperatorConfig)
r.status.SetNotDegraded(statusmanager.OperatorConfig)

// All was successful. Request that this be re-triggered after ResyncPeriod,
// so we can reconcile state again.
Expand Down
43 changes: 21 additions & 22 deletions pkg/controller/statusmanager/status_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ func (status *StatusManager) Set(reachedAvailableLevel bool, conditions ...confi
}
}

// syncFailing syncs the current Failing status
func (status *StatusManager) syncFailing() {
// syncDegraded syncs the current Degraded status
func (status *StatusManager) syncDegraded() {
for _, c := range status.failing {
if c != nil {
status.Set(false, *c)
Expand All @@ -120,32 +120,32 @@ func (status *StatusManager) syncFailing() {
status.Set(
false,
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorFailing,
Type: configv1.OperatorDegraded,
Status: configv1.ConditionFalse,
},
)
}

// SetFailing marks the operator as Failing with the given reason and message. If it
// SetDegraded marks the operator as Degraded with the given reason and message. If it
// is not already failing for a lower-level reason, the operator's status will be updated.
func (status *StatusManager) SetFailing(level StatusLevel, reason, message string) {
func (status *StatusManager) SetDegraded(level StatusLevel, reason, message string) {
status.failing[level] = &configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorFailing,
Type: configv1.OperatorDegraded,
Status: configv1.ConditionTrue,
Reason: reason,
Message: message,
}
status.syncFailing()
status.syncDegraded()
}

// SetNotFailing marks the operator as not Failing at the given level. If the operator
// SetNotDegraded marks the operator as not Degraded at the given level. If the operator
// status previously indicated failure at this level, it will updated to show the next
// higher-level failure, or else to show that the operator is no longer failing.
func (status *StatusManager) SetNotFailing(level StatusLevel) {
func (status *StatusManager) SetNotDegraded(level StatusLevel) {
if status.failing[level] != nil {
status.failing[level] = nil
}
status.syncFailing()
status.syncDegraded()
}

func (status *StatusManager) SetDaemonSets(daemonSets []types.NamespacedName) {
Expand All @@ -156,9 +156,8 @@ func (status *StatusManager) SetDeployments(deployments []types.NamespacedName)
status.deployments = deployments
}

// SetFromPods sets the operator status to Failing, Progressing, or Available, based on
// the current status of the manager's DaemonSets and Deployments. However, this is a
// no-op if the StatusManager is currently marked as failing due to a configuration error.
// SetFromPods sets the operator Degraded/Progressing/Available status, based on
// the current status of the manager's DaemonSets and Deployments.
func (status *StatusManager) SetFromPods() {

targetLevel := os.Getenv("RELEASE_VERSION")
Expand All @@ -170,10 +169,10 @@ func (status *StatusManager) SetFromPods() {
ns := &corev1.Namespace{}
if err := status.client.Get(context.TODO(), types.NamespacedName{Name: dsName.Namespace}, ns); err != nil {
if errors.IsNotFound(err) {
status.SetFailing(PodDeployment, "NoNamespace",
status.SetDegraded(PodDeployment, "NoNamespace",
fmt.Sprintf("Namespace %q does not exist", dsName.Namespace))
} else {
status.SetFailing(PodDeployment, "InternalError",
status.SetDegraded(PodDeployment, "InternalError",
fmt.Sprintf("Internal error deploying pods: %v", err))
}
return
Expand All @@ -182,10 +181,10 @@ func (status *StatusManager) SetFromPods() {
ds := &appsv1.DaemonSet{}
if err := status.client.Get(context.TODO(), dsName, ds); err != nil {
if errors.IsNotFound(err) {
status.SetFailing(PodDeployment, "NoDaemonSet",
status.SetDegraded(PodDeployment, "NoDaemonSet",
fmt.Sprintf("Expected DaemonSet %q does not exist", dsName.String()))
} else {
status.SetFailing(PodDeployment, "InternalError",
status.SetDegraded(PodDeployment, "InternalError",
fmt.Sprintf("Internal error deploying pods: %v", err))
}
return
Expand All @@ -210,10 +209,10 @@ func (status *StatusManager) SetFromPods() {
ns := &corev1.Namespace{}
if err := status.client.Get(context.TODO(), types.NamespacedName{Name: depName.Namespace}, ns); err != nil {
if errors.IsNotFound(err) {
status.SetFailing(PodDeployment, "NoNamespace",
status.SetDegraded(PodDeployment, "NoNamespace",
fmt.Sprintf("Namespace %q does not exist", depName.Namespace))
} else {
status.SetFailing(PodDeployment, "InternalError",
status.SetDegraded(PodDeployment, "InternalError",
fmt.Sprintf("Internal error deploying pods: %v", err))
}
return
Expand All @@ -222,10 +221,10 @@ func (status *StatusManager) SetFromPods() {
dep := &appsv1.Deployment{}
if err := status.client.Get(context.TODO(), depName, dep); err != nil {
if errors.IsNotFound(err) {
status.SetFailing(PodDeployment, "NoDeployment",
status.SetDegraded(PodDeployment, "NoDeployment",
fmt.Sprintf("Expected Deployment %q does not exist", depName.String()))
} else {
status.SetFailing(PodDeployment, "InternalError",
status.SetDegraded(PodDeployment, "InternalError",
fmt.Sprintf("Internal error deploying pods: %v", err))
}
return
Expand All @@ -250,7 +249,7 @@ func (status *StatusManager) SetFromPods() {
}
}

status.SetNotFailing(PodDeployment)
status.SetNotDegraded(PodDeployment)

if len(progressing) > 0 {
status.Set(
Expand Down
Loading