diff --git a/pkg/authtoken/providers/azure/azure_msi.go b/pkg/authtoken/providers/azure/azure_msi.go index 13f3085e9..fd8909135 100644 --- a/pkg/authtoken/providers/azure/azure_msi.go +++ b/pkg/authtoken/providers/azure/azure_msi.go @@ -36,7 +36,7 @@ func (a *azureAuthTokenProvider) FetchToken(ctx context.Context) (interfaces.Aut token := interfaces.AuthToken{} opts := &azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID(a.clientID)} - klog.V(5).InfoS("FetchToken", "client ID", a.clientID) + klog.V(2).InfoS("FetchToken", "client ID", a.clientID) credential, err := azidentity.NewManagedIdentityCredential(opts) if err != nil { return token, errors.Wrap(err, "failed to create managed identity cred") @@ -46,7 +46,7 @@ func (a *azureAuthTokenProvider) FetchToken(ctx context.Context) (interfaces.Aut func(err error) bool { return ctx.Err() == nil }, func() error { - klog.V(5).InfoS("GetToken start", "credential", credential) + klog.V(2).InfoS("GetToken start", "credential", credential) azToken, err = credential.GetToken(ctx, policy.TokenRequestOptions{ Scopes: []string{aksScope}, }) diff --git a/pkg/authtoken/providers/secret/k8s_secret.go b/pkg/authtoken/providers/secret/k8s_secret.go index 0e9265f99..8a3afb8cf 100644 --- a/pkg/authtoken/providers/secret/k8s_secret.go +++ b/pkg/authtoken/providers/secret/k8s_secret.go @@ -43,7 +43,7 @@ func New(secretName, namespace string) (interfaces.AuthTokenProvider, error) { } func (s *secretAuthTokenProvider) FetchToken(ctx context.Context) (interfaces.AuthToken, error) { - klog.V(3).InfoS("fetching token from secret", "secret", klog.KRef(s.secretName, s.secretNamespace)) + klog.V(2).InfoS("fetching token from secret", "secret", klog.KRef(s.secretName, s.secretNamespace)) token := interfaces.AuthToken{} secret, err := s.fetchSecret(ctx) if err != nil { diff --git a/pkg/authtoken/token_refresher.go b/pkg/authtoken/token_refresher.go index da3aa6fbb..7f4f71a4a 100644 --- a/pkg/authtoken/token_refresher.go +++ b/pkg/authtoken/token_refresher.go @@ -45,7 +45,7 @@ var ( ) func (at *Refresher) callFetchToken(ctx context.Context) (interfaces.AuthToken, error) { - klog.V(5).InfoS("FetchToken start") + klog.V(2).InfoS("FetchToken start") deadline := time.Now().Add(DefaultRefreshDuration) fetchTokenContext, cancel := context.WithDeadline(ctx, deadline) defer cancel() @@ -67,7 +67,7 @@ func (at *Refresher) RefreshToken(ctx context.Context) error { continue } - klog.V(5).InfoS("WriteToken start") + klog.V(2).InfoS("WriteToken start") err = at.writer.WriteToken(token) if err != nil { klog.ErrorS(err, "Failed to WriteToken") diff --git a/pkg/authtoken/token_writer.go b/pkg/authtoken/token_writer.go index 6a4d13d4e..7a03fee38 100644 --- a/pkg/authtoken/token_writer.go +++ b/pkg/authtoken/token_writer.go @@ -55,6 +55,6 @@ func (w *Writer) WriteToken(token interfaces.AuthToken) error { if err != nil { return errors.Wrap(err, "cannot write the refresh token") } - klog.V(3).InfoS("token has been saved to the file successfully") + klog.V(2).InfoS("token has been saved to the file successfully") return nil } diff --git a/pkg/controllers/clusterresourceplacement/cluster_selector.go b/pkg/controllers/clusterresourceplacement/cluster_selector.go index 53ad51ccd..d26ece432 100644 --- a/pkg/controllers/clusterresourceplacement/cluster_selector.go +++ b/pkg/controllers/clusterresourceplacement/cluster_selector.go @@ -35,13 +35,13 @@ func (r *Reconciler) selectClusters(placement *fleetv1alpha1.ClusterResourcePlac if err != nil { return nil, err } - klog.V(4).InfoS("we select all the available clusters in the fleet without a policy", + klog.V(2).InfoS("we select all the available clusters in the fleet without a policy", "placement", placement.Name, "clusters", clusterNames) return } // a fix list of clusters set if len(placement.Spec.Policy.ClusterNames) != 0 { - klog.V(4).InfoS("use the cluster names provided as the list of cluster we select", + klog.V(2).InfoS("use the cluster names provided as the list of cluster we select", "placement", placement.Name, "clusters", placement.Spec.Policy.ClusterNames) clusterNames, err = r.getClusters(placement.Spec.Policy.ClusterNames) if err != nil { @@ -56,7 +56,7 @@ func (r *Reconciler) selectClusters(placement *fleetv1alpha1.ClusterResourcePlac if err != nil { return nil, err } - klog.V(4).InfoS("we select all the available clusters in the fleet without a cluster affinity", + klog.V(2).InfoS("we select all the available clusters in the fleet without a cluster affinity", "placement", placement.Name, "clusters", clusterNames) return } @@ -71,13 +71,13 @@ func (r *Reconciler) selectClusters(placement *fleetv1alpha1.ClusterResourcePlac if err != nil { return nil, errors.Wrap(err, fmt.Sprintf("selector = %v", clusterSelector.LabelSelector)) } - klog.V(4).InfoS("selector matches some cluster", "clusterNum", len(matchClusters), "placement", placement.Name, "selector", clusterSelector.LabelSelector) + klog.V(2).InfoS("selector matches some cluster", "clusterNum", len(matchClusters), "placement", placement.Name, "selector", clusterSelector.LabelSelector) for _, clusterName := range matchClusters { selectedClusters[clusterName] = true } } for cluster := range selectedClusters { - klog.V(4).InfoS("matched a cluster", "cluster", cluster, "placement", placement.Name) + klog.V(2).InfoS("matched a cluster", "cluster", cluster, "placement", placement.Name) clusterNames = append(clusterNames, cluster) } return clusterNames, nil diff --git a/pkg/controllers/clusterresourceplacement/placement_controller.go b/pkg/controllers/clusterresourceplacement/placement_controller.go index fc3c8c19f..1242f04fa 100644 --- a/pkg/controllers/clusterresourceplacement/placement_controller.go +++ b/pkg/controllers/clusterresourceplacement/placement_controller.go @@ -92,7 +92,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct return r.removeAllWorks(ctx, placementOld) } - klog.V(3).InfoS("Successfully selected clusters", "placement", placementOld.Name, "number of clusters", len(selectedClusters)) + klog.V(2).InfoS("Successfully selected clusters", "placement", placementOld.Name, "number of clusters", len(selectedClusters)) // select the new resources and record the result in the placementNew status manifests, scheduleErr := r.selectResources(ctx, placementNew) @@ -107,7 +107,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct klog.V(2).InfoS("No resources match the placement", "placement", placeRef) return r.removeAllWorks(ctx, placementOld) } - klog.V(3).InfoS("Successfully selected resources", "placement", placementOld.Name, "number of resources", len(manifests)) + klog.V(2).InfoS("Successfully selected resources", "placement", placementOld.Name, "number of resources", len(manifests)) // persist union of the all the selected resources and clusters between placementNew and placementOld so that we won't // get orphaned resource/cluster if the reconcile loops stops between work creation and the placement status persisted @@ -118,7 +118,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct _ = r.Client.Status().Update(ctx, placementOld, client.FieldOwner(utils.PlacementFieldManagerName)) return ctrl.Result{}, scheduleErr } - klog.V(3).InfoS("Successfully persisted the intermediate scheduling result", "placement", placementOld.Name, + klog.V(2).InfoS("Successfully persisted the intermediate scheduling result", "placement", placementOld.Name, "totalClusters", totalCluster, "totalResources", totalResources) // pick up the new version so placementNew can continue to update placementNew.SetResourceVersion(placementOld.GetResourceVersion()) @@ -131,7 +131,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct _ = r.Client.Status().Update(ctx, placementOld, client.FieldOwner(utils.PlacementFieldManagerName)) return ctrl.Result{}, scheduleErr } - klog.V(3).InfoS("Successfully scheduled work resources", "placement", placementOld.Name, "number of clusters", len(selectedClusters)) + klog.V(2).InfoS("Successfully scheduled work resources", "placement", placementOld.Name, "number of clusters", len(selectedClusters)) // go through the existing cluster list and remove work from no longer scheduled clusters. removed, scheduleErr := r.removeStaleWorks(ctx, placementNew.GetName(), placementOld.Status.TargetClusters, placementNew.Status.TargetClusters) @@ -144,7 +144,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct _ = r.Client.Status().Update(ctx, placementOld, client.FieldOwner(utils.PlacementFieldManagerName)) return ctrl.Result{}, scheduleErr } - klog.V(3).InfoS("Successfully removed work resources from previously selected clusters", "placement", placementOld.Name, "removed clusters", removed) + klog.V(2).InfoS("Successfully removed work resources from previously selected clusters", "placement", placementOld.Name, "removed clusters", removed) // the schedule has succeeded, so we now can use the placementNew status that contains all the newly selected cluster and resources r.updatePlacementScheduledCondition(placementNew, nil) @@ -157,7 +157,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct _ = r.Client.Status().Update(ctx, placementNew, client.FieldOwner(utils.PlacementFieldManagerName)) return ctrl.Result{}, applyErr } - klog.V(3).InfoS("Successfully collected work resources status from all selected clusters", + klog.V(2).InfoS("Successfully collected work resources status from all selected clusters", "placement", placementOld.Name, "number of clusters", len(selectedClusters), "hasPending", hasPending, "numberFailedPlacement", len(placementNew.Status.FailedResourcePlacements)) @@ -182,7 +182,7 @@ func (r *Reconciler) removeAllWorks(ctx context.Context, placement *fleetv1alpha klog.ErrorS(removeErr, "failed to remove all the work resources from previously selected clusters", "placement", placeRef) return ctrl.Result{}, removeErr } - klog.V(3).InfoS("Successfully removed work resources from previously selected clusters", + klog.V(2).InfoS("Successfully removed work resources from previously selected clusters", "placement", placeRef, "number of removed clusters", removed) placement.Status.TargetClusters = nil placement.Status.SelectedResources = nil @@ -262,7 +262,7 @@ func (r *Reconciler) updatePlacementScheduledCondition(placement *fleetv1alpha1. ObservedGeneration: placement.Generation, }) if schedCond == nil || schedCond.Status != metav1.ConditionTrue { - klog.V(3).InfoS("successfully scheduled all selected resources to their clusters", "placement", placementRef) + klog.V(2).InfoS("successfully scheduled all selected resources to their clusters", "placement", placementRef) r.Recorder.Event(placement, corev1.EventTypeNormal, "ResourceScheduled", "successfully scheduled all selected resources to their clusters") } } else { @@ -293,7 +293,7 @@ func (r *Reconciler) updatePlacementAppliedCondition(placement *fleetv1alpha1.Cl Message: "Successfully applied resources to member clusters", ObservedGeneration: placement.Generation, }) - klog.V(3).InfoS("successfully applied all selected resources", "placement", placementRef) + klog.V(2).InfoS("successfully applied all selected resources", "placement", placementRef) if preAppliedCond == nil || preAppliedCond.Status != metav1.ConditionTrue { r.Recorder.Event(placement, corev1.EventTypeNormal, "ResourceApplied", "successfully applied all selected resources") } @@ -305,7 +305,7 @@ func (r *Reconciler) updatePlacementAppliedCondition(placement *fleetv1alpha1.Cl Message: applyErr.Error(), ObservedGeneration: placement.Generation, }) - klog.V(3).InfoS("Some selected resources are still waiting to be applied", "placement", placementRef) + klog.V(2).InfoS("Some selected resources are still waiting to be applied", "placement", placementRef) if preAppliedCond == nil || preAppliedCond.Status == metav1.ConditionTrue { r.Recorder.Event(placement, corev1.EventTypeWarning, "ResourceApplyPending", "Some applied resources are now waiting to be applied to the member cluster") } @@ -318,7 +318,7 @@ func (r *Reconciler) updatePlacementAppliedCondition(placement *fleetv1alpha1.Cl Message: applyErr.Error(), ObservedGeneration: placement.Generation, }) - klog.V(3).InfoS("failed to apply some selected resources", "placement", placementRef) + klog.V(2).InfoS("failed to apply some selected resources", "placement", placementRef) if preAppliedCond == nil || preAppliedCond.Status != metav1.ConditionFalse { r.Recorder.Event(placement, corev1.EventTypeWarning, "ResourceApplyFailed", "failed to apply some selected resources") } diff --git a/pkg/controllers/clusterresourceplacement/resource_selector.go b/pkg/controllers/clusterresourceplacement/resource_selector.go index c6fed883e..4cc580a13 100644 --- a/pkg/controllers/clusterresourceplacement/resource_selector.go +++ b/pkg/controllers/clusterresourceplacement/resource_selector.go @@ -47,7 +47,7 @@ func (r *Reconciler) selectResources(ctx context.Context, placement *fleetv1alph Namespace: unstructuredObj.GetNamespace(), } placement.Status.SelectedResources = append(placement.Status.SelectedResources, res) - klog.V(4).InfoS("selected one resource ", "placement", placement.Name, "resource", res) + klog.V(2).InfoS("selected one resource ", "placement", placement.Name, "resource", res) manifest, err := generateManifest(unstructuredObj) if err != nil { return nil, err @@ -68,7 +68,7 @@ func (r *Reconciler) gatherSelectedResource(ctx context.Context, placement *flee } if r.DisabledResourceConfig.IsResourceDisabled(gvk) { - klog.V(4).InfoS("Skip select resource", "group version kind", gvk.String()) + klog.V(2).InfoS("Skip select resource", "group version kind", gvk.String()) continue } var objs []runtime.Object @@ -105,7 +105,7 @@ func (r *Reconciler) gatherSelectedResource(ctx context.Context, placement *flee // fetchClusterScopedResources retrieve the objects based on the selector. func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector fleetv1alpha1.ClusterResourceSelector, placeName string) ([]runtime.Object, error) { - klog.V(4).InfoS("start to fetch the cluster scoped resources by the selector", "selector", selector) + klog.V(2).InfoS("start to fetch the cluster scoped resources by the selector", "selector", selector) gk := schema.GroupKind{ Group: selector.Group, Kind: selector.Kind, @@ -138,7 +138,7 @@ func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector f uObj := obj.DeepCopyObject().(*unstructured.Unstructured) if uObj.GetDeletionTimestamp() != nil { // skip a to be deleted namespace - klog.V(4).InfoS("skip the deleting cluster scoped resources by the selector", + klog.V(2).InfoS("skip the deleting cluster scoped resources by the selector", "selector", selector, "placeName", placeName, "resource name", uObj.GetName()) return []runtime.Object{}, nil } @@ -165,7 +165,7 @@ func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector f uObj := objects[i].DeepCopyObject().(*unstructured.Unstructured) if uObj.GetDeletionTimestamp() != nil { // skip a to be deleted namespace - klog.V(4).InfoS("skip the deleting cluster scoped resources by the selector", + klog.V(2).InfoS("skip the deleting cluster scoped resources by the selector", "selector", selector, "placeName", placeName, "resource name", uObj.GetName()) continue } @@ -177,7 +177,7 @@ func (r *Reconciler) fetchClusterScopedResources(ctx context.Context, selector f // fetchNamespaceResources retrieve all the objects for a ClusterResourceSelector that is for namespace. func (r *Reconciler) fetchNamespaceResources(ctx context.Context, selector fleetv1alpha1.ClusterResourceSelector, placeName string) ([]runtime.Object, error) { - klog.V(4).InfoS("start to fetch the namespace resources by the selector", "selector", selector) + klog.V(2).InfoS("start to fetch the namespace resources by the selector", "selector", selector) var resources []runtime.Object if len(selector.Name) != 0 { @@ -229,7 +229,7 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(ctx context.Context, namesp return nil, errors.New(fmt.Sprintf("namespace %s is not allowed to propagate", namespaceName)) } - klog.V(4).InfoS("start to fetch all the resources inside a namespace", "namespace", namespaceName) + klog.V(2).InfoS("start to fetch all the resources inside a namespace", "namespace", namespaceName) // select the namespace object itself obj, err := r.InformerManager.Lister(utils.NamespaceGVR).Get(namespaceName) if err != nil { @@ -239,7 +239,7 @@ func (r *Reconciler) fetchAllResourcesInOneNamespace(ctx context.Context, namesp nameSpaceObj := obj.DeepCopyObject().(*unstructured.Unstructured) if nameSpaceObj.GetDeletionTimestamp() != nil { // skip a to be deleted namespace - klog.V(4).InfoS("skip the deleting namespace resources by the selector", + klog.V(2).InfoS("skip the deleting namespace resources by the selector", "placeName", placeName, "namespace", namespaceName) return resources, nil } @@ -285,7 +285,7 @@ func (r *Reconciler) shouldSelectResource(gvr schema.GroupVersionResource) bool } for _, gvk := range gvks { if r.DisabledResourceConfig.IsResourceDisabled(gvk) { - klog.V(4).InfoS("Skip watch resource", "group version kind", gvk.String()) + klog.V(2).InfoS("Skip watch resource", "group version kind", gvk.String()) return false } } diff --git a/pkg/controllers/clusterresourceplacement/work_propagation.go b/pkg/controllers/clusterresourceplacement/work_propagation.go index ad4fc4a09..fffc43a75 100644 --- a/pkg/controllers/clusterresourceplacement/work_propagation.go +++ b/pkg/controllers/clusterresourceplacement/work_propagation.go @@ -100,7 +100,7 @@ func (r *Reconciler) scheduleWork(ctx context.Context, placement *fleetv1alpha1. } existingHash := curWork.GetAnnotations()[SpecHashAnnotationKey] if existingHash == specHash || reflect.DeepEqual(curWork.Spec.Workload.Manifests, workerSpec.Workload.Manifests) { - klog.V(4).InfoS("skip updating work spec as its identical", + klog.V(2).InfoS("skip updating work spec as its identical", "member cluster namespace", memberClusterNsName, "work name", workName, "number of manifests", len(manifests)) continue } @@ -113,13 +113,13 @@ func (r *Reconciler) scheduleWork(ctx context.Context, placement *fleetv1alpha1. allErr = append(allErr, errors.Wrap(updateErr, fmt.Sprintf("failed to update the work obj %s in namespace %s", workName, memberClusterNsName))) continue } - klog.V(3).InfoS("updated work spec with manifests", + klog.V(2).InfoS("updated work spec with manifests", "member cluster namespace", memberClusterNsName, "work name", workName, "number of manifests", len(manifests)) } if changed { klog.V(2).InfoS("Applied all work to the selected cluster namespaces", "placement", klog.KObj(placement), "number of clusters", len(memberClusterNames)) } else { - klog.V(3).InfoS("Nothing new to apply for the cluster resource placement", "placement", klog.KObj(placement), "number of clusters", len(memberClusterNames)) + klog.V(2).InfoS("Nothing new to apply for the cluster resource placement", "placement", klog.KObj(placement), "number of clusters", len(memberClusterNames)) } return apiErrors.NewAggregate(allErr) @@ -166,7 +166,7 @@ func (r *Reconciler) collectAllManifestsStatus(placement *fleetv1alpha1.ClusterR work, err := r.getResourceBinding(memberClusterNsName, workName) if err != nil { if apierrors.IsNotFound(err) { - klog.V(3).InfoS("the work change has not shown up in the cache yet", + klog.V(2).InfoS("the work change has not shown up in the cache yet", "work", klog.KRef(memberClusterNsName, workName), "cluster", cluster) hasPending = true continue @@ -177,19 +177,19 @@ func (r *Reconciler) collectAllManifestsStatus(placement *fleetv1alpha1.ClusterR appliedCond := meta.FindStatusCondition(work.Status.Conditions, workapi.ConditionTypeApplied) if appliedCond == nil { hasPending = true - klog.V(4).InfoS("the work is never picked up by the member cluster", + klog.V(2).InfoS("the work is never picked up by the member cluster", "work", klog.KObj(work), "cluster", cluster) continue } if appliedCond.ObservedGeneration < work.GetGeneration() { hasPending = true - klog.V(4).InfoS("the update of the work is not picked up by the member cluster yet", + klog.V(2).InfoS("the update of the work is not picked up by the member cluster yet", "work", klog.KObj(work), "cluster", cluster, "work generation", work.GetGeneration(), "applied generation", appliedCond.ObservedGeneration) continue } if appliedCond.Status == metav1.ConditionTrue { - klog.V(4).InfoS("the work is applied successfully by the member cluster", + klog.V(2).InfoS("the work is applied successfully by the member cluster", "work", klog.KObj(work), "cluster", cluster) continue } @@ -204,7 +204,7 @@ func (r *Reconciler) collectAllManifestsStatus(placement *fleetv1alpha1.ClusterR appliedCond = meta.FindStatusCondition(manifestCondition.Conditions, workapi.ConditionTypeApplied) // collect if there is an explicit fail if appliedCond != nil && appliedCond.Status != metav1.ConditionTrue { - klog.V(3).InfoS("find a failed to apply manifest", "member cluster namespace", memberClusterNsName, + klog.V(2).InfoS("find a failed to apply manifest", "member cluster namespace", memberClusterNsName, "manifest name", manifestCondition.Identifier.Name, "group", manifestCondition.Identifier.Group, "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind) placement.Status.FailedResourcePlacements = append(placement.Status.FailedResourcePlacements, fleetv1alpha1.FailedResourcePlacement{ diff --git a/pkg/controllers/internalmembercluster/member_controller.go b/pkg/controllers/internalmembercluster/member_controller.go index 4c8f8db19..a9eb2fd4f 100644 --- a/pkg/controllers/internalmembercluster/member_controller.go +++ b/pkg/controllers/internalmembercluster/member_controller.go @@ -150,7 +150,7 @@ func (r *Reconciler) updateHealth(ctx context.Context, imc *fleetv1alpha1.Intern // updateResourceStats collects and updates resource usage stats of the member cluster. func (r *Reconciler) updateResourceStats(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(4).InfoS("updateResourceStats", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("updateResourceStats", "InternalMemberCluster", klog.KObj(imc)) var nodes corev1.NodeList if err := r.memberClient.List(ctx, &nodes); err != nil { return errors.Wrapf(err, "failed to list nodes for member cluster %s", klog.KObj(imc)) @@ -180,7 +180,7 @@ func (r *Reconciler) updateResourceStats(ctx context.Context, imc *fleetv1alpha1 // updateInternalMemberClusterWithRetry updates InternalMemberCluster status. func (r *Reconciler) updateInternalMemberClusterWithRetry(ctx context.Context, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(4).InfoS("updateInternalMemberClusterWithRetry", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("updateInternalMemberClusterWithRetry", "InternalMemberCluster", klog.KObj(imc)) backOffPeriod := retry.DefaultBackoff backOffPeriod.Cap = time.Second * time.Duration(imc.Spec.HeartbeatPeriodSeconds) @@ -195,7 +195,7 @@ func (r *Reconciler) updateInternalMemberClusterWithRetry(ctx context.Context, i // updateMemberAgentHeartBeat is used to update member agent heart beat for Internal member cluster. func updateMemberAgentHeartBeat(imc *fleetv1alpha1.InternalMemberCluster) { - klog.V(4).InfoS("update Internal member cluster heartbeat", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("update Internal member cluster heartbeat", "InternalMemberCluster", klog.KObj(imc)) desiredAgentStatus := imc.GetAgentStatus(fleetv1alpha1.MemberAgent) if desiredAgentStatus != nil { desiredAgentStatus.LastReceivedHeartbeat = metav1.Now() @@ -203,7 +203,7 @@ func updateMemberAgentHeartBeat(imc *fleetv1alpha1.InternalMemberCluster) { } func (r *Reconciler) markInternalMemberClusterHealthy(imc apis.ConditionedAgentObj) { - klog.V(4).InfoS("markInternalMemberClusterHealthy", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("markInternalMemberClusterHealthy", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentHealthy), Status: metav1.ConditionTrue, @@ -222,7 +222,7 @@ func (r *Reconciler) markInternalMemberClusterHealthy(imc apis.ConditionedAgentO } func (r *Reconciler) markInternalMemberClusterUnhealthy(imc apis.ConditionedAgentObj, err error) { - klog.V(4).InfoS("markInternalMemberClusterUnhealthy", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("markInternalMemberClusterUnhealthy", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentHealthy), Status: metav1.ConditionFalse, @@ -242,7 +242,7 @@ func (r *Reconciler) markInternalMemberClusterUnhealthy(imc apis.ConditionedAgen } func (r *Reconciler) markInternalMemberClusterJoined(imc apis.ConditionedAgentObj) { - klog.V(4).InfoS("markInternalMemberClusterJoined", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("markInternalMemberClusterJoined", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionTrue, @@ -262,7 +262,7 @@ func (r *Reconciler) markInternalMemberClusterJoined(imc apis.ConditionedAgentOb } func (r *Reconciler) markInternalMemberClusterJoinFailed(imc apis.ConditionedAgentObj, err error) { - klog.V(4).InfoS("markInternalMemberCluster join failed", "error", err, "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("markInternalMemberCluster join failed", "error", err, "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionUnknown, @@ -282,7 +282,7 @@ func (r *Reconciler) markInternalMemberClusterJoinFailed(imc apis.ConditionedAge } func (r *Reconciler) markInternalMemberClusterLeft(imc apis.ConditionedAgentObj) { - klog.V(4).InfoS("markInternalMemberClusterLeft", "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("markInternalMemberClusterLeft", "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionFalse, @@ -302,7 +302,7 @@ func (r *Reconciler) markInternalMemberClusterLeft(imc apis.ConditionedAgentObj) } func (r *Reconciler) markInternalMemberClusterLeaveFailed(imc apis.ConditionedAgentObj, err error) { - klog.V(4).InfoS("markInternalMemberCluster leave failed", "error", err, "InternalMemberCluster", klog.KObj(imc)) + klog.V(2).InfoS("markInternalMemberCluster leave failed", "error", err, "InternalMemberCluster", klog.KObj(imc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.AgentJoined), Status: metav1.ConditionUnknown, diff --git a/pkg/controllers/membercluster/membercluster_controller.go b/pkg/controllers/membercluster/membercluster_controller.go index 4704e4602..4e812cc2a 100644 --- a/pkg/controllers/membercluster/membercluster_controller.go +++ b/pkg/controllers/membercluster/membercluster_controller.go @@ -61,7 +61,7 @@ type Reconciler struct { } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.V(3).InfoS("Reconcile", "memberCluster", req.NamespacedName) + klog.V(2).InfoS("Reconcile", "memberCluster", req.NamespacedName) var oldMC fleetv1alpha1.MemberCluster if err := r.Client.Get(ctx, req.NamespacedName, &oldMC); err != nil { klog.ErrorS(err, "failed to get member cluster", "memberCluster", req.Name) @@ -181,7 +181,7 @@ func (r *Reconciler) ensureFinalizer(ctx context.Context, mc *fleetv1alpha1.Memb // Condition ReadyToJoin == true means all the above actions have been done successfully at least once. // It will never turn false after true. func (r *Reconciler) join(ctx context.Context, mc *fleetv1alpha1.MemberCluster, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(3).InfoS("join", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("join", "memberCluster", klog.KObj(mc)) namespaceName, err := r.syncNamespace(ctx, mc) if err != nil { @@ -210,7 +210,7 @@ func (r *Reconciler) join(ctx context.Context, mc *fleetv1alpha1.MemberCluster, // // Note that leave doesn't delete any of the resources created by join(). Instead, deleting MemberCluster will delete them. func (r *Reconciler) leave(ctx context.Context, mc *fleetv1alpha1.MemberCluster, imc *fleetv1alpha1.InternalMemberCluster) error { - klog.V(3).InfoS("leave", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("leave", "memberCluster", klog.KObj(mc)) // Never joined successfully before. if imc == nil { return nil @@ -227,7 +227,7 @@ func (r *Reconciler) leave(ctx context.Context, mc *fleetv1alpha1.MemberCluster, // syncNamespace creates or updates the namespace for member cluster. func (r *Reconciler) syncNamespace(ctx context.Context, mc *fleetv1alpha1.MemberCluster) (string, error) { - klog.V(5).InfoS("syncNamespace", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("syncNamespace", "memberCluster", klog.KObj(mc)) namespaceName := fmt.Sprintf(utils.NamespaceNameFormat, mc.Name) expectedNS := corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -242,7 +242,7 @@ func (r *Reconciler) syncNamespace(ctx context.Context, mc *fleetv1alpha1.Member if !apierrors.IsNotFound(err) { return "", errors.Wrapf(err, "failed to get namespace %s", namespaceName) } - klog.V(4).InfoS("creating namespace", "memberCluster", klog.KObj(mc), "namespace", namespaceName) + klog.V(2).InfoS("creating namespace", "memberCluster", klog.KObj(mc), "namespace", namespaceName) // Make sure the entire namespace is removed if the member cluster is deleted. if err = r.Client.Create(ctx, &expectedNS, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return "", errors.Wrapf(err, "failed to create namespace %s", namespaceName) @@ -259,7 +259,7 @@ func (r *Reconciler) syncNamespace(ctx context.Context, mc *fleetv1alpha1.Member // syncRole creates or updates the role for member cluster to access its namespace in hub cluster. func (r *Reconciler) syncRole(ctx context.Context, mc *fleetv1alpha1.MemberCluster, namespaceName string) (string, error) { - klog.V(5).InfoS("syncRole", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("syncRole", "memberCluster", klog.KObj(mc)) // Role name is created using member cluster name. roleName := fmt.Sprintf(utils.RoleNameFormat, mc.Name) expectedRole := rbacv1.Role{ @@ -277,7 +277,7 @@ func (r *Reconciler) syncRole(ctx context.Context, mc *fleetv1alpha1.MemberClust if !apierrors.IsNotFound(err) { return "", errors.Wrapf(err, "failed to get role %s", roleName) } - klog.V(4).InfoS("creating role", "memberCluster", klog.KObj(mc), "role", roleName) + klog.V(2).InfoS("creating role", "memberCluster", klog.KObj(mc), "role", roleName) if err = r.Client.Create(ctx, &expectedRole, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return "", errors.Wrapf(err, "failed to create role %s with rules %+v", roleName, expectedRole.Rules) } @@ -291,7 +291,7 @@ func (r *Reconciler) syncRole(ctx context.Context, mc *fleetv1alpha1.MemberClust return roleName, nil } currentRole.Rules = expectedRole.Rules - klog.V(4).InfoS("updating role", "memberCluster", klog.KObj(mc), "role", roleName) + klog.V(2).InfoS("updating role", "memberCluster", klog.KObj(mc), "role", roleName) if err := r.Client.Update(ctx, ¤tRole, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return "", errors.Wrapf(err, "failed to update role %s with rules %+v", roleName, currentRole.Rules) } @@ -302,7 +302,7 @@ func (r *Reconciler) syncRole(ctx context.Context, mc *fleetv1alpha1.MemberClust // syncRoleBinding creates or updates the role binding for member cluster to access its namespace in hub cluster. func (r *Reconciler) syncRoleBinding(ctx context.Context, mc *fleetv1alpha1.MemberCluster, namespaceName string, roleName string) error { - klog.V(5).InfoS("syncRoleBinding", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("syncRoleBinding", "memberCluster", klog.KObj(mc)) // Role binding name is created using member cluster name roleBindingName := fmt.Sprintf(utils.RoleBindingNameFormat, mc.Name) expectedRoleBinding := rbacv1.RoleBinding{ @@ -325,7 +325,7 @@ func (r *Reconciler) syncRoleBinding(ctx context.Context, mc *fleetv1alpha1.Memb if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "failed to get role binding %s", roleBindingName) } - klog.V(4).InfoS("creating role binding", "memberCluster", klog.KObj(mc), "roleBinding", roleBindingName) + klog.V(2).InfoS("creating role binding", "memberCluster", klog.KObj(mc), "roleBinding", roleBindingName) if err = r.Client.Create(ctx, &expectedRoleBinding, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return errors.Wrapf(err, "failed to create role binding %s", roleBindingName) } @@ -340,7 +340,7 @@ func (r *Reconciler) syncRoleBinding(ctx context.Context, mc *fleetv1alpha1.Memb } currentRoleBinding.Subjects = expectedRoleBinding.Subjects currentRoleBinding.RoleRef = expectedRoleBinding.RoleRef - klog.V(4).InfoS("updating role binding", "memberCluster", klog.KObj(mc), "roleBinding", roleBindingName) + klog.V(2).InfoS("updating role binding", "memberCluster", klog.KObj(mc), "roleBinding", roleBindingName) if err := r.Client.Update(ctx, &expectedRoleBinding, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return errors.Wrapf(err, "failed to update role binding %s", roleBindingName) } @@ -352,7 +352,7 @@ func (r *Reconciler) syncRoleBinding(ctx context.Context, mc *fleetv1alpha1.Memb // syncInternalMemberCluster is used to sync spec from MemberCluster to InternalMemberCluster. func (r *Reconciler) syncInternalMemberCluster(ctx context.Context, mc *fleetv1alpha1.MemberCluster, namespaceName string, currentImc *fleetv1alpha1.InternalMemberCluster) (*fleetv1alpha1.InternalMemberCluster, error) { - klog.V(5).InfoS("syncInternalMemberCluster", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("syncInternalMemberCluster", "memberCluster", klog.KObj(mc)) expectedImc := fleetv1alpha1.InternalMemberCluster{ ObjectMeta: metav1.ObjectMeta{ Name: mc.Name, @@ -367,7 +367,7 @@ func (r *Reconciler) syncInternalMemberCluster(ctx context.Context, mc *fleetv1a // Creates internal member cluster if not found. if currentImc == nil { - klog.V(4).InfoS("creating internal member cluster", "InternalMemberCluster", klog.KObj(&expectedImc), "spec", expectedImc.Spec) + klog.V(2).InfoS("creating internal member cluster", "InternalMemberCluster", klog.KObj(&expectedImc), "spec", expectedImc.Spec) if err := r.Client.Create(ctx, &expectedImc, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return nil, errors.Wrapf(err, "failed to create internal member cluster %s with spec %+v", klog.KObj(&expectedImc), expectedImc.Spec) } @@ -381,7 +381,7 @@ func (r *Reconciler) syncInternalMemberCluster(ctx context.Context, mc *fleetv1a return currentImc, nil } currentImc.Spec = expectedImc.Spec - klog.V(4).InfoS("updating internal member cluster", "InternalMemberCluster", klog.KObj(currentImc), "spec", currentImc.Spec) + klog.V(2).InfoS("updating internal member cluster", "InternalMemberCluster", klog.KObj(currentImc), "spec", currentImc.Spec) if err := r.Client.Update(ctx, currentImc, client.FieldOwner(utils.MCControllerFieldManagerName)); err != nil { return nil, errors.Wrapf(err, "failed to update internal member cluster %s with spec %+v", klog.KObj(currentImc), currentImc.Spec) } @@ -397,7 +397,7 @@ func toOwnerReference(memberCluster *fleetv1alpha1.MemberCluster) *metav1.OwnerR // syncInternalMemberClusterStatus is used to sync status from InternalMemberCluster to MemberCluster & aggregate join conditions from all agents. func (r *Reconciler) syncInternalMemberClusterStatus(imc *fleetv1alpha1.InternalMemberCluster, mc *fleetv1alpha1.MemberCluster) { - klog.V(5).InfoS("syncInternalMemberClusterStatus", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("syncInternalMemberClusterStatus", "memberCluster", klog.KObj(mc)) if imc == nil { return } @@ -412,7 +412,7 @@ func (r *Reconciler) syncInternalMemberClusterStatus(imc *fleetv1alpha1.Internal // updateMemberClusterStatus is used to update member cluster status. func (r *Reconciler) updateMemberClusterStatus(ctx context.Context, mc *fleetv1alpha1.MemberCluster) error { - klog.V(5).InfoS("updateMemberClusterStatus", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("updateMemberClusterStatus", "memberCluster", klog.KObj(mc)) backOffPeriod := retry.DefaultRetry backOffPeriod.Cap = time.Second * time.Duration(mc.Spec.HeartbeatPeriodSeconds/2) @@ -427,7 +427,7 @@ func (r *Reconciler) updateMemberClusterStatus(ctx context.Context, mc *fleetv1a // aggregateJoinedCondition is used to calculate and mark the joined or left status for member cluster based on join conditions from all agents. func (r *Reconciler) aggregateJoinedCondition(mc *fleetv1alpha1.MemberCluster) { - klog.V(5).InfoS("syncJoinedCondition", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("syncJoinedCondition", "memberCluster", klog.KObj(mc)) if len(mc.Status.AgentStatus) < len(r.agents) { markMemberClusterUnknown(r.recorder, mc) return @@ -467,7 +467,7 @@ func (r *Reconciler) aggregateJoinedCondition(mc *fleetv1alpha1.MemberCluster) { // markMemberClusterReadyToJoin is used to update the ReadyToJoin condition as true of member cluster. func markMemberClusterReadyToJoin(recorder record.EventRecorder, mc apis.ConditionedObj) { - klog.V(5).InfoS("markMemberClusterReadyToJoin", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("markMemberClusterReadyToJoin", "memberCluster", klog.KObj(mc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.ConditionTypeMemberClusterReadyToJoin), Status: metav1.ConditionTrue, @@ -487,7 +487,7 @@ func markMemberClusterReadyToJoin(recorder record.EventRecorder, mc apis.Conditi // markMemberClusterJoined is used to the update the status of the member cluster to have the joined condition. func markMemberClusterJoined(recorder record.EventRecorder, mc apis.ConditionedObj) { - klog.V(5).InfoS("markMemberClusterJoined", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("markMemberClusterJoined", "memberCluster", klog.KObj(mc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.ConditionTypeMemberClusterJoined), Status: metav1.ConditionTrue, @@ -508,7 +508,7 @@ func markMemberClusterJoined(recorder record.EventRecorder, mc apis.ConditionedO // markMemberClusterLeft is used to update the status of the member cluster to have the left condition and mark member cluster as not ready to join. func markMemberClusterLeft(recorder record.EventRecorder, mc apis.ConditionedObj) { - klog.V(5).InfoS("markMemberClusterLeft", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("markMemberClusterLeft", "memberCluster", klog.KObj(mc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.ConditionTypeMemberClusterJoined), Status: metav1.ConditionFalse, @@ -535,7 +535,7 @@ func markMemberClusterLeft(recorder record.EventRecorder, mc apis.ConditionedObj // markMemberClusterUnknown is used to update the status of the member cluster to have the left condition. func markMemberClusterUnknown(recorder record.EventRecorder, mc apis.ConditionedObj) { - klog.V(5).InfoS("markMemberClusterUnknown", "memberCluster", klog.KObj(mc)) + klog.V(2).InfoS("markMemberClusterUnknown", "memberCluster", klog.KObj(mc)) newCondition := metav1.Condition{ Type: string(fleetv1alpha1.ConditionTypeMemberClusterJoined), Status: metav1.ConditionUnknown, diff --git a/pkg/controllers/memberclusterplacement/membercluster_controller.go b/pkg/controllers/memberclusterplacement/membercluster_controller.go index d32d1c23e..4ce81cf4d 100644 --- a/pkg/controllers/memberclusterplacement/membercluster_controller.go +++ b/pkg/controllers/memberclusterplacement/membercluster_controller.go @@ -65,10 +65,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct } if mObj == nil { // This is a corner case that the member cluster is deleted before we handle its status change. We can't use match since we don't have its label. - klog.V(3).InfoS("enqueue a placement to reconcile for a deleted member cluster", "memberCluster", memberClusterName, "placement", klog.KObj(&placement)) + klog.V(2).InfoS("enqueue a placement to reconcile for a deleted member cluster", "memberCluster", memberClusterName, "placement", klog.KObj(&placement)) r.PlacementController.Enqueue(crpList[i]) } else if matchPlacement(&placement, mObj.(*unstructured.Unstructured).DeepCopy()) { - klog.V(3).InfoS("enqueue a placement to reconcile", "memberCluster", memberClusterName, "placement", klog.KObj(&placement)) + klog.V(2).InfoS("enqueue a placement to reconcile", "memberCluster", memberClusterName, "placement", klog.KObj(&placement)) r.PlacementController.Enqueue(crpList[i]) } } @@ -87,7 +87,7 @@ func matchPlacement(placement *fleetv1alpha1.ClusterResourcePlacement, memberClu } // no policy set if placement.Spec.Policy == nil { - klog.V(4).InfoS("find a matching placement with no policy", + klog.V(2).InfoS("find a matching placement with no policy", "memberCluster", memberCluster.GetName(), "placement", placementObj) return true } @@ -96,7 +96,7 @@ func matchPlacement(placement *fleetv1alpha1.ClusterResourcePlacement, memberClu if len(placement.Spec.Policy.ClusterNames) != 0 { for _, clusterName := range placement.Spec.Policy.ClusterNames { if clusterName == memberCluster.GetName() { - klog.V(4).InfoS("find a matching placement with a list of cluster names", + klog.V(2).InfoS("find a matching placement with a list of cluster names", "memberCluster", memberCluster.GetName(), "placement", placementObj) return true } @@ -106,7 +106,7 @@ func matchPlacement(placement *fleetv1alpha1.ClusterResourcePlacement, memberClu // no cluster affinity set if placement.Spec.Policy.Affinity == nil || placement.Spec.Policy.Affinity.ClusterAffinity == nil || len(placement.Spec.Policy.Affinity.ClusterAffinity.ClusterSelectorTerms) == 0 { - klog.V(4).InfoS("find a matching placement with no cluster affinity", + klog.V(2).InfoS("find a matching placement with no cluster affinity", "memberCluster", memberCluster.GetName(), "placement", placementObj) return true } @@ -119,7 +119,7 @@ func matchPlacement(placement *fleetv1alpha1.ClusterResourcePlacement, memberClu continue } if s.Matches(labels.Set(memberCluster.GetLabels())) { - klog.V(4).InfoS("find a matching placement with label selector", + klog.V(2).InfoS("find a matching placement with label selector", "memberCluster", memberCluster.GetName(), "placement", placementObj, "selector", clusterSelector.LabelSelector) return true } diff --git a/pkg/controllers/resourcechange/resourcechange_controller.go b/pkg/controllers/resourcechange/resourcechange_controller.go index 18c13d18e..c787038d5 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller.go +++ b/pkg/controllers/resourcechange/resourcechange_controller.go @@ -56,7 +56,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct klog.ErrorS(err, "we have encountered a fatal error that can't be retried") return ctrl.Result{}, err } - klog.V(3).InfoS("Reconciling object", "obj", clusterWideKey) + klog.V(2).InfoS("Reconciling object", "obj", clusterWideKey) // the clusterObj is set to be the object that the placement direct selects, // in the case of a deleted namespace scoped object, the clusterObj is set to be its parent namespace object. @@ -79,7 +79,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct klog.ErrorS(err, "Failed to find the namespace the resource belongs to", "obj", clusterWideKey) return ctrl.Result{}, client.IgnoreNotFound(err) } - klog.V(4).InfoS("Find placement that select the namespace that contains a namespace scoped object", "obj", clusterWideKey) + klog.V(2).InfoS("Find placement that select the namespace that contains a namespace scoped object", "obj", clusterWideKey) } matchedCrps, err := r.findAffectedPlacements(clusterObj.DeepCopyObject().(*unstructured.Unstructured)) if err != nil { @@ -87,12 +87,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ct return ctrl.Result{}, err } if len(matchedCrps) == 0 { - klog.V(4).InfoS("change in object does not affect any placement", "obj", clusterWideKey) + klog.V(2).InfoS("change in object does not affect any placement", "obj", clusterWideKey) return ctrl.Result{}, nil } // enqueue each CRP object into the CRP controller queue to get reconciled for crp := range matchedCrps { - klog.V(3).InfoS("Change in object triggered placement reconcile", "obj", clusterWideKey, "crp", crp) + klog.V(2).InfoS("Change in object triggered placement reconcile", "obj", clusterWideKey, "crp", crp) r.PlacementController.Enqueue(crp) } @@ -123,11 +123,11 @@ func (r *Reconciler) findPlacementsSelectedDeletedRes(res keys.ClusterWideKey, c } } if len(matchedCrps) == 0 { - klog.V(4).InfoS("change in deleted object does not affect any placement", "obj", res) + klog.V(2).InfoS("change in deleted object does not affect any placement", "obj", res) return ctrl.Result{}, nil } for _, crp := range matchedCrps { - klog.V(3).InfoS("Change in deleted object triggered placement reconcile", "obj", res, "crp", crp) + klog.V(2).InfoS("change in deleted object triggered placement reconcile", "obj", res, "crp", crp) r.PlacementController.Enqueue(crp) } return ctrl.Result{}, nil diff --git a/pkg/controllers/work/apply_controller.go b/pkg/controllers/work/apply_controller.go index 8af9f4625..1db3c05b1 100644 --- a/pkg/controllers/work/apply_controller.go +++ b/pkg/controllers/work/apply_controller.go @@ -154,7 +154,7 @@ func (r *ApplyWorkReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } else if len(staleRes) > 0 { klog.V(2).InfoS("successfully garbage-collected all stale manifests", work.Kind, logObjRef, "number of GCed res", len(staleRes)) for _, res := range staleRes { - klog.V(5).InfoS("successfully garbage-collected a stale manifest", work.Kind, logObjRef, "res", res) + klog.V(2).InfoS("successfully garbage-collected a stale manifest", work.Kind, logObjRef, "res", res) } } @@ -367,7 +367,7 @@ func (r *ApplyWorkReconciler) patchCurrentResource(ctx context.Context, gvr sche Name: manifestObj.GetName(), Namespace: manifestObj.GetNamespace(), } - klog.V(5).InfoS("manifest is modified", "gvr", gvr, "manifest", manifestRef, + klog.V(2).InfoS("manifest is modified", "gvr", gvr, "manifest", manifestRef, "new hash", manifestObj.GetAnnotations()[manifestHashAnnotation], "existing hash", curObj.GetAnnotations()[manifestHashAnnotation]) diff --git a/pkg/resourcewatcher/event_handlers.go b/pkg/resourcewatcher/event_handlers.go index da8b82238..496bcd21f 100644 --- a/pkg/resourcewatcher/event_handlers.go +++ b/pkg/resourcewatcher/event_handlers.go @@ -42,7 +42,7 @@ func handleTombStoneObj(obj interface{}) (client.Object, error) { // onClusterResourcePlacementAdded handles object add event and push the placement to the cluster placement queue. func (d *ChangeDetector) onClusterResourcePlacementAdded(obj interface{}) { placementMeta, _ := meta.Accessor(obj) - klog.V(4).InfoS("ClusterResourcePlacement Added", "placement", klog.KObj(placementMeta)) + klog.V(3).InfoS("ClusterResourcePlacement Added", "placement", klog.KObj(placementMeta)) d.ClusterResourcePlacementController.Enqueue(obj) } @@ -51,11 +51,11 @@ func (d *ChangeDetector) onClusterResourcePlacementUpdated(oldObj, newObj interf oldPlacementMeta, _ := meta.Accessor(oldObj) newPlacementMeta, _ := meta.Accessor(newObj) if oldPlacementMeta.GetGeneration() == newPlacementMeta.GetGeneration() { - klog.V(5).InfoS("ignore a cluster resource placement update event with no spec change", + klog.V(4).InfoS("ignore a cluster resource placement update event with no spec change", "placement", klog.KObj(oldPlacementMeta)) return } - klog.V(4).InfoS("ClusterResourcePlacement Updated", + klog.V(3).InfoS("ClusterResourcePlacement Updated", "placement", klog.KObj(oldPlacementMeta)) d.ClusterResourcePlacementController.Enqueue(newObj) } @@ -66,7 +66,7 @@ func (d *ChangeDetector) onClusterResourcePlacementDeleted(obj interface{}) { if err != nil { klog.ErrorS(err, "failed to handle a cluster resource placement object delete event") } - klog.V(4).InfoS("a clusterResourcePlacement is deleted", "placement", klog.KObj(clientObj)) + klog.V(3).InfoS("a clusterResourcePlacement is deleted", "placement", klog.KObj(clientObj)) d.ClusterResourcePlacementController.Enqueue(clientObj) } @@ -80,7 +80,7 @@ func (d *ChangeDetector) onWorkUpdated(oldObj, newObj interface{}) { } // we never change the placement label of a work if placementName, exist := oldWorkMeta.GetLabels()[utils.LabelWorkPlacementName]; exist { - klog.V(4).InfoS("a work object is updated, will enqueue a placement event", "work", klog.KObj(oldWorkMeta), "placement", placementName) + klog.V(3).InfoS("a work object is updated, will enqueue a placement event", "work", klog.KObj(oldWorkMeta), "placement", placementName) // the meta key function handles string d.ClusterResourcePlacementController.Enqueue(placementName) } else { @@ -96,7 +96,7 @@ func (d *ChangeDetector) onWorkDeleted(obj interface{}) { return } if placementName, exist := clientObj.GetLabels()[utils.LabelWorkPlacementName]; exist { - klog.V(4).InfoS("a work object is deleted", "work", klog.KObj(clientObj), "placement", placementName) + klog.V(3).InfoS("a work object is deleted", "work", klog.KObj(clientObj), "placement", placementName) // the meta key function handles string d.ClusterResourcePlacementController.Enqueue(placementName) } else { @@ -123,12 +123,12 @@ func (d *ChangeDetector) onMemberClusterUpdated(oldObj, newObj interface{}) { if oldMC.GetGeneration() == newMC.GetGeneration() && reflect.DeepEqual(oldMC.GetLabels(), newMC.GetLabels()) && reflect.DeepEqual(oldMC.Status.Conditions, newMC.Status.Conditions) { - klog.V(5).InfoS("ignore a memberCluster update event with no real change", + klog.V(4).InfoS("ignore a memberCluster update event with no real change", "memberCluster", klog.KObj(&oldMC), "generation", oldMC.GetGeneration()) return } - klog.V(4).InfoS("a memberCluster is updated", "memberCluster", klog.KObj(&oldMC)) + klog.V(3).InfoS("a memberCluster is updated", "memberCluster", klog.KObj(&oldMC)) d.MemberClusterPlacementController.Enqueue(oldObj) } @@ -145,7 +145,7 @@ func (d *ChangeDetector) onResourceAdded(obj interface{}) { klog.ErrorS(err, "skip process an unknown obj", "gvk", runtimeObject.GetObjectKind().GroupVersionKind().String()) return } - klog.V(5).InfoS("A resource is added", "obj", klog.KObj(metaInfo), + klog.V(3).InfoS("A resource is added", "obj", klog.KObj(metaInfo), "gvk", runtimeObject.GetObjectKind().GroupVersionKind().String()) d.ResourceChangeController.Enqueue(obj) } @@ -168,12 +168,12 @@ func (d *ChangeDetector) onResourceUpdated(oldObj, newObj interface{}) { return } if oldObjMeta.GetResourceVersion() != newObjMeta.GetResourceVersion() { - klog.V(5).InfoS("A resource is updated", "obj", oldObjMeta.GetName(), + klog.V(3).InfoS("A resource is updated", "obj", oldObjMeta.GetName(), "namespace", oldObjMeta.GetNamespace(), "gvk", runtimeObject.GetObjectKind().GroupVersionKind().String()) d.ResourceChangeController.Enqueue(newObj) return } - klog.V(5).InfoS("Received a resource updated event with no change", "obj", oldObjMeta.GetName(), + klog.V(4).InfoS("Received a resource updated event with no change", "obj", oldObjMeta.GetName(), "namespace", oldObjMeta.GetNamespace(), "gvk", runtimeObject.GetObjectKind().GroupVersionKind().String()) } @@ -184,6 +184,6 @@ func (d *ChangeDetector) onResourceDeleted(obj interface{}) { klog.ErrorS(err, "failed to handle an object delete event") return } - klog.V(5).InfoS("A resource is deleted", "obj", klog.KObj(clientObj), "gvk", clientObj.GetObjectKind().GroupVersionKind().String()) + klog.V(3).InfoS("A resource is deleted", "obj", klog.KObj(clientObj), "gvk", clientObj.GetObjectKind().GroupVersionKind().String()) d.ResourceChangeController.Enqueue(clientObj) }