Skip to content

Commit 2d144d1

Browse files
committed
fixup
1 parent c1896b2 commit 2d144d1

File tree

16 files changed

+31
-32
lines changed

16 files changed

+31
-32
lines changed

bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
113113
r.TokenTTL = DefaultTokenTTL
114114
}
115115

116-
tr := tlog.Reconciler(r, "controllers.KubeadmConfigReconciler", "kubeadmconfig")
116+
tr := tlog.Reconciler(r)
117117
b := ctrl.NewControllerManagedBy(mgr).
118118
For(&bootstrapv1.KubeadmConfig{}).
119119
WithOptions(options).

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ type KubeadmControlPlaneReconciler struct {
7979
}
8080

8181
func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
82-
tr := tlog.Reconciler(r, "controllers.KubeadmControlPlaneReconciler", "kubeadmcontrolplane")
82+
tr := tlog.Reconciler(r)
8383
c, err := ctrl.NewControllerManagedBy(mgr).
8484
For(&controlplanev1.KubeadmControlPlane{}).
8585
Owns(&clusterv1.Machine{}).
@@ -253,6 +253,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc
253253
// reconcile handles KubeadmControlPlane reconciliation.
254254
func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (res ctrl.Result, reterr error) {
255255
log := ctrl.LoggerFrom(ctx)
256+
log.Info("Reconcile KubeadmControlPlane")
256257

257258
// Make sure to reconcile the external infrastructure reference.
258259
if err := r.reconcileExternalReference(ctx, cluster, &kcp.Spec.MachineTemplate.InfrastructureRef); err != nil {

hack/observability/promtail/values.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ config:
66

77
snippets:
88
pipelineStages:
9+
# Parse cluster and machine to make them available as labels.
910
- cri: { }
1011
- json:
1112
expressions:

internal/controllers/cluster/cluster_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ type Reconciler struct {
7676
}
7777

7878
func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
79-
tr := tlog.Reconciler(r, "controllers.ClusterReconciler", "cluster")
79+
tr := tlog.Reconciler(r)
8080
controller, err := ctrl.NewControllerManagedBy(mgr).
8181
For(&clusterv1.Cluster{}).
8282
Watches(

internal/controllers/clusterclass/clusterclass_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ type Reconciler struct {
5757
}
5858

5959
func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
60-
tr := tlog.Reconciler(r, "controllers.ClusterClassReconciler", "clusterclass")
60+
tr := tlog.Reconciler(r)
6161
err := ctrl.NewControllerManagedBy(mgr).
6262
For(&clusterv1.ClusterClass{}).
6363
Named("clusterclass").

internal/controllers/machine/machine_controller.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt
103103
r.nodeDeletionRetryTimeout = 10 * time.Second
104104
}
105105

106-
tr := log.Reconciler(r, "controllers.MachineReconciler", "machine")
106+
tr := log.Reconciler(r)
107107
controller, err := ctrl.NewControllerManagedBy(mgr).
108108
For(&clusterv1.Machine{}).
109109
WithOptions(options).
@@ -297,7 +297,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
297297
if err != nil {
298298
switch err {
299299
case errNoControlPlaneNodes, errLastControlPlaneNode, errNilNodeRef, errClusterIsBeingDeleted, errControlPlaneIsBeingDeleted:
300-
log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", m.Status.NodeRef, "cause", err.Error())
300+
log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", klog.KRef("", m.Status.NodeRef.Name), "cause", err.Error())
301301
default:
302302
return ctrl.Result{}, errors.Wrapf(err, "failed to check if Kubernetes Node deletion is allowed")
303303
}
@@ -319,7 +319,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
319319
return ctrl.Result{}, err
320320
}
321321

322-
log.Info("Draining node", "node", m.Status.NodeRef.Name)
322+
log.Info("Draining node", "node", klog.KRef("", m.Status.NodeRef.Name))
323323
// The DrainingSucceededCondition never exists before the node is drained for the first time,
324324
// so its transition time can be used to record the first time draining.
325325
// This `if` condition prevents the transition time to be changed more than once.
@@ -351,7 +351,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
351351
r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error wait for volume detach, node %q: %v", m.Status.NodeRef.Name, err)
352352
return ctrl.Result{}, err
353353
}
354-
log.Info("Waiting for node volumes to be detached", "node", m.Status.NodeRef.Name)
354+
log.Info("Waiting for node volumes to be detached", "node", klog.KRef("", m.Status.NodeRef.Name))
355355
return ctrl.Result{}, nil
356356
}
357357
conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition)
@@ -391,7 +391,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
391391
// We only delete the node after the underlying infrastructure is gone.
392392
// https://github.com/kubernetes-sigs/cluster-api/issues/2565
393393
if isDeleteNodeAllowed {
394-
log.Info("Deleting node", "node", m.Status.NodeRef.Name)
394+
log.Info("Deleting node", "node", klog.KRef("", m.Status.NodeRef.Name))
395395

396396
var deleteNodeErr error
397397
waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) {
@@ -401,7 +401,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
401401
return true, nil
402402
})
403403
if waitErr != nil {
404-
log.Error(deleteNodeErr, "Timed out deleting node", "node", m.Status.NodeRef.Name)
404+
log.Error(deleteNodeErr, "Timed out deleting node", "node", klog.KRef("", m.Status.NodeRef.Name))
405405
conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "")
406406
r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr)
407407

@@ -449,7 +449,7 @@ func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool {
449449
// isDeleteNodeAllowed returns nil only if the Machine's NodeRef is not nil
450450
// and if the Machine is not the last control plane node in the cluster.
451451
func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
452-
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster).String())
452+
log := ctrl.LoggerFrom(ctx)
453453
// Return early if the cluster is being deleted.
454454
if !cluster.DeletionTimestamp.IsZero() {
455455
return errClusterIsBeingDeleted
@@ -509,7 +509,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1
509509
}
510510

511511
func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) {
512-
log := ctrl.LoggerFrom(ctx, "node", nodeName)
512+
log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName))
513513

514514
restConfig, err := remote.RESTConfig(ctx, controllerName, r.Client, util.ObjectKey(cluster))
515515
if err != nil {
@@ -581,7 +581,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster,
581581
// because if the node is deleted before detach success, then the underline VMDK will be deleted together with the Machine
582582
// so after node draining we need to check if all volumes are detached before deleting the node.
583583
func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (bool, error) {
584-
log := ctrl.LoggerFrom(ctx, "node", nodeName)
584+
log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName))
585585

586586
remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
587587
if err != nil {

internal/controllers/machine/machine_controller_noderef.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"github.com/pkg/errors"
2424
corev1 "k8s.io/api/core/v1"
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26+
"k8s.io/klog/v2"
2627
ctrl "sigs.k8s.io/controller-runtime"
2728
"sigs.k8s.io/controller-runtime/pkg/client"
2829

@@ -188,7 +189,7 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID *n
188189
for key, node := range nl.Items {
189190
nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID)
190191
if err != nil {
191-
log.Error(err, "Failed to parse ProviderID", "node", client.ObjectKeyFromObject(&nl.Items[key]).String())
192+
log.Error(err, "Failed to parse ProviderID", "node", klog.KRef("", nl.Items[key].GetName()).String())
192193
continue
193194
}
194195

internal/controllers/machinedeployment/machinedeployment_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt
7474
return err
7575
}
7676

77-
tr := tlog.Reconciler(r, "controllers.MachineDeploymentReconciler", "machinedeployment")
77+
tr := tlog.Reconciler(r)
7878
c, err := ctrl.NewControllerManagedBy(mgr).
7979
For(&clusterv1.MachineDeployment{}).
8080
Owns(&clusterv1.MachineSet{}).

internal/controllers/machinehealthcheck/machinehealthcheck_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ type Reconciler struct {
8686
}
8787

8888
func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
89-
tr := tlog.Reconciler(r, "controllers.MachineHealthCheck", "machinehealthcheck")
89+
tr := tlog.Reconciler(r)
9090
controller, err := ctrl.NewControllerManagedBy(mgr).
9191
For(&clusterv1.MachineHealthCheck{}).
9292
Watches(

internal/controllers/machineset/machineset_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt
8787
return err
8888
}
8989

90-
tr := tlog.Reconciler(r, "controllers.MachineSetReconciler", "machineset")
90+
tr := tlog.Reconciler(r)
9191
c, err := ctrl.NewControllerManagedBy(mgr).
9292
For(&clusterv1.MachineSet{}).
9393
Owns(&clusterv1.Machine{}).

0 commit comments

Comments
 (0)