Skip to content

Commit de2a5e1

Browse files
Fix up additonal log lines
Signed-off-by: killianmuldoon <[email protected]>
1 parent 35d4dd7 commit de2a5e1

File tree

25 files changed

+74
-62
lines changed

25 files changed

+74
-62
lines changed

bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -191,8 +191,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
191191
return ctrl.Result{}, err
192192
}
193193

194-
log = log.WithValues("Cluster", klog.KObj(cluster))
195-
ctx = ctrl.LoggerInto(ctx, log)
194+
ctx = ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))
196195

197196
if annotations.IsPaused(cluster, config) {
198197
log.Info("Reconciliation is paused for this object")

cmd/clusterctl/client/cluster/mover.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -540,7 +540,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err
540540
setClusterPauseBackoff := newWriteBackoff()
541541
for i := range clusters {
542542
cluster := clusters[i]
543-
log.V(5).Info("Set Cluster.Spec.Paused", "Paused", value, "Cluster", cluster.identity.Name, "Namespace", cluster.identity.Namespace)
543+
log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "cluster", cluster.identity.Name, "namespace", cluster.identity.Namespace)
544544

545545
// Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions.
546546
if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error {

controllers/remote/cluster_cache.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -236,11 +236,11 @@ func (t *ClusterCacheTracker) deleteAccessor(cluster client.ObjectKey) {
236236
return
237237
}
238238

239-
t.log.V(2).Info("Deleting clusterAccessor", "Cluster", cluster.String())
240-
241-
t.log.V(4).Info("Stopping cache", "Cluster", cluster.String())
239+
t.log.WithValues("cluster", klog.KRef(cluster.Namespace, cluster.Name))
240+
t.log.V(2).Info("Deleting clusterAccessor")
241+
t.log.V(4).Info("Stopping cache")
242242
a.cache.Stop()
243-
t.log.V(4).Info("Cache stopped", "Cluster", cluster.String())
243+
t.log.V(4).Info("Cache stopped")
244244

245245
delete(t.clusterAccessors, cluster)
246246
}
@@ -287,7 +287,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error
287287
}
288288

289289
if a.watches.Has(input.Name) {
290-
t.log.V(6).Info("Watch already exists", "Namespace", klog.KRef(input.Cluster.Namespace, ""), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "Name", input.Name)
290+
t.log.V(6).Info("Watch already exists", "namespace", klog.KRef(input.Cluster.Namespace, ""), "cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "Name", input.Name)
291291
return nil
292292
}
293293

@@ -392,7 +392,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health
392392
// NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case
393393
// happens when the cache is explicitly stopped.
394394
if err != nil && err != wait.ErrWaitTimeout {
395-
t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
395+
t.log.Error(err, "Error health checking cluster", "cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
396396
t.deleteAccessor(in.cluster)
397397
}
398398
}

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
143143
log.Info("Cluster Controller has not yet set OwnerRef")
144144
return ctrl.Result{}, nil
145145
}
146-
log = log.WithValues("Cluster", klog.KObj(cluster))
146+
log = log.WithValues("cluster", klog.KObj(cluster))
147147
ctx = ctrl.LoggerInto(ctx, log)
148148

149149
if annotations.IsPaused(cluster, kcp) {
@@ -463,7 +463,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
463463
var errs []error
464464
for i := range machinesToDelete {
465465
m := machinesToDelete[i]
466-
logger := log.WithValues("Machine", klog.KObj(m))
466+
logger := log.WithValues("machine", klog.KObj(m))
467467
if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) {
468468
logger.Error(err, "Failed to cleanup owned machine")
469469
errs = append(errs, err)

controlplane/kubeadm/internal/controllers/remediation.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C
9797
if err := patchHelper.Patch(ctx, machineToBeRemediated, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
9898
clusterv1.MachineOwnerRemediatedCondition,
9999
}}); err != nil {
100-
log.Error(err, "Failed to patch control plane Machine", "Machine", machineToBeRemediated.Name)
100+
log.Error(err, "Failed to patch control plane Machine", "machine", machineToBeRemediated.Name)
101101
if retErr == nil {
102102
retErr = errors.Wrapf(err, "failed to patch control plane Machine %s", machineToBeRemediated.Name)
103103
}

controlplane/kubeadm/internal/controllers/scale.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
141141
return ctrl.Result{}, err
142142
}
143143

144-
logger = logger.WithValues("Machine", klog.KObj(machineToDelete))
144+
logger = logger.WithValues("machine", klog.KObj(machineToDelete))
145145
if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) {
146146
logger.Error(err, "Failed to delete control plane machine")
147147
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown",
@@ -201,7 +201,7 @@ loopmachines:
201201
}
202202

203203
for _, condition := range allMachineHealthConditions {
204-
if err := preflightCheckCondition("Machine", machine, condition); err != nil {
204+
if err := preflightCheckCondition("machine", machine, condition); err != nil {
205205
machineErrors = append(machineErrors, err)
206206
}
207207
}

controlplane/kubeadm/internal/controllers/status.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"context"
2121

2222
"github.com/pkg/errors"
23+
"k8s.io/klog/v2"
2324
ctrl "sigs.k8s.io/controller-runtime"
2425

2526
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -33,7 +34,7 @@ import (
3334
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
3435
// resource status subresourcs up-to-date.
3536
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error {
36-
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
37+
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))
3738

3839
selector := collections.ControlPlaneSelectorForCluster(cluster.Name)
3940
// Copy label selector to its status counterpart in string format.

exp/addons/internal/controllers/clusterresourceset_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ func (r *ClusterResourceSetReconciler) getClustersByClusterResourceSetSelector(c
236236
// It applies resources best effort and continue on scenarios like: unsupported resource types, failure during creation, missing resources.
237237
// TODO: If a resource already exists in the cluster but not applied by ClusterResourceSet, the resource will be updated ?
238238
func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster, clusterResourceSet *addonsv1.ClusterResourceSet) error {
239-
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
239+
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
240240

241241
remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
242242
if err != nil {

exp/internal/controllers/machinepool_controller_noderef.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323

2424
"github.com/pkg/errors"
2525
corev1 "k8s.io/api/core/v1"
26+
"k8s.io/klog/v2"
2627
ctrl "sigs.k8s.io/controller-runtime"
2728
"sigs.k8s.io/controller-runtime/pkg/client"
2829

@@ -47,7 +48,7 @@ type getNodeReferencesResult struct {
4748
}
4849

4950
func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
50-
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
51+
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
5152
// Check that the MachinePool hasn't been deleted or in the process.
5253
if !mp.DeletionTimestamp.IsZero() {
5354
return ctrl.Result{}, nil
@@ -65,7 +66,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
6566
return ctrl.Result{}, nil
6667
}
6768

68-
log = log.WithValues("Cluster", cluster.Name)
69+
log = log.WithValues("cluster", klog.KObj(cluster))
6970

7071
// Check that the MachinePool has valid ProviderIDList.
7172
if len(mp.Spec.ProviderIDList) == 0 && (mp.Spec.Replicas == nil || *mp.Spec.Replicas != 0) {

exp/internal/controllers/machinepool_controller_phases.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
corev1 "k8s.io/api/core/v1"
2727
apierrors "k8s.io/apimachinery/pkg/api/errors"
2828
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
29+
"k8s.io/klog/v2"
2930
"k8s.io/utils/pointer"
3031
ctrl "sigs.k8s.io/controller-runtime"
3132
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@@ -166,7 +167,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster *
166167

167168
// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool.
168169
func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) {
169-
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
170+
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))
170171

171172
// Call generic external reconciler if we have an external reference.
172173
var bootstrapConfig *unstructured.Unstructured
@@ -226,7 +227,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster
226227

227228
// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool.
228229
func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
229-
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
230+
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))
230231

231232
// Call generic external reconciler.
232233
infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef)

0 commit comments

Comments
 (0)