|
| 1 | +/* |
| 2 | +Copyright 2019 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package loadbalancer |
| 18 | + |
| 19 | +import ( |
| 20 | + "github.com/go-logr/logr" |
| 21 | + "github.com/pkg/errors" |
| 22 | + |
| 23 | + "k8s.io/client-go/tools/record" |
| 24 | + clusterutilv1 "sigs.k8s.io/cluster-api/util" |
| 25 | + ctrl "sigs.k8s.io/controller-runtime" |
| 26 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 27 | + "sigs.k8s.io/controller-runtime/pkg/reconcile" |
| 28 | + |
| 29 | + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha2" |
| 30 | + "sigs.k8s.io/cluster-api-provider-vsphere/api/v1alpha2/loadbalancer" |
| 31 | + "sigs.k8s.io/cluster-api-provider-vsphere/controllers" |
| 32 | + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/config" |
| 33 | + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/context" |
| 34 | + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services" |
| 35 | + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/services/loadbalancer/aws" |
| 36 | + infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/util" |
| 37 | +) |
| 38 | + |
| 39 | +type providerFunc func(config *loadbalancer.Config, client client.Client, logger logr.Logger) (services.LoadBalancerService, error) |
| 40 | + |
| 41 | +var providerFactory = map[string]providerFunc{loadbalancer.AwsProvider: aws.NewProvider} |
| 42 | + |
| 43 | +// Reconciler reconciles load balancers |
| 44 | +type Reconciler struct { |
| 45 | + client.Client |
| 46 | + ProviderName string |
| 47 | + Recorder record.EventRecorder |
| 48 | + Log logr.Logger |
| 49 | +} |
| 50 | + |
| 51 | +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vsphereclusters,verbs=get;list;watch; |
| 52 | +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vsphereclusters/status,verbs=get;update;patch |
| 53 | + |
| 54 | +// Reconcile ensures the back-end state reflects the Kubernetes state intent for a LoadBalancer resource. |
| 55 | +func (r *Reconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) { |
| 56 | + logger := r.Log.WithName("loadBalancer") |
| 57 | + |
| 58 | + ctx, result, err := controllers.GetClusterContext(req, logger, r) |
| 59 | + if err != nil { |
| 60 | + return result, err |
| 61 | + } |
| 62 | + vsphereCluster := ctx.VSphereCluster |
| 63 | + // check if the cluster does not have Load balancing config |
| 64 | + if vsphereCluster.Spec.LoadBalancerConfiguration == nil { |
| 65 | + return ctrl.Result{}, reterr |
| 66 | + } |
| 67 | + |
| 68 | + // Always close the context when exiting this function so we can persist any VSphereCluster changes. |
| 69 | + defer func() { |
| 70 | + if err := ctx.Patch(); err != nil && reterr == nil { |
| 71 | + reterr = err |
| 72 | + } |
| 73 | + }() |
| 74 | + |
| 75 | + // Handle deleted clusters |
| 76 | + if !vsphereCluster.DeletionTimestamp.IsZero() { |
| 77 | + return r.reconcileDelete(ctx) |
| 78 | + } |
| 79 | + |
| 80 | + // Handle non-deleted clusters |
| 81 | + return r.reconcileNormal(ctx) |
| 82 | +} |
| 83 | + |
| 84 | +func (r *Reconciler) reconcileDelete(ctx *context.ClusterContext) (ctrl.Result, error) { |
| 85 | + vsphereCluster := ctx.VSphereCluster |
| 86 | + newProvider, ok := providerFactory[r.ProviderName] |
| 87 | + if !ok { |
| 88 | + err := errors.Errorf("load balancer factory missing for %q", r.ProviderName) |
| 89 | + ctx.Logger.Error(err, "unable to initialize the load balancer provider") |
| 90 | + return ctrl.Result{}, err |
| 91 | + } |
| 92 | + loadBalancerProvider, err := newProvider(vsphereCluster.Spec.LoadBalancerConfiguration, r.Client, ctx.Logger) |
| 93 | + if err != nil { |
| 94 | + return ctrl.Result{}, err |
| 95 | + } |
| 96 | + |
| 97 | + if err = loadBalancerProvider.Delete(ctx.Cluster); err != nil { |
| 98 | + return reconcile.Result{RequeueAfter: config.DefaultRequeue}, err |
| 99 | + } |
| 100 | + vsphereCluster.Finalizers = clusterutilv1.Filter(vsphereCluster.Finalizers, infrav1.LoadBalancerFinalizer) |
| 101 | + return ctrl.Result{}, nil |
| 102 | +} |
| 103 | + |
| 104 | +func (r *Reconciler) reconcileNormal(ctx *context.ClusterContext) (ctrl.Result, error) { |
| 105 | + vsphereCluster := ctx.VSphereCluster |
| 106 | + newProvider, ok := providerFactory[r.ProviderName] |
| 107 | + if !ok { |
| 108 | + err := errors.Errorf("load balancer factory missing for %q", r.ProviderName) |
| 109 | + ctx.Logger.Error(err, "unable to initialize the load balancer provider") |
| 110 | + return ctrl.Result{}, err |
| 111 | + } |
| 112 | + loadBalancerProvider, err := newProvider(vsphereCluster.Spec.LoadBalancerConfiguration, r.Client, ctx.Logger) |
| 113 | + if err != nil { |
| 114 | + return ctrl.Result{}, err |
| 115 | + } |
| 116 | + ctx.Logger.V(4).Info("reconciling loadbalancer") |
| 117 | + |
| 118 | + machines, err := infrautilv1.GetMachinesInCluster(ctx, r.Client, vsphereCluster.Namespace, vsphereCluster.Name) |
| 119 | + if err != nil { |
| 120 | + return ctrl.Result{}, err |
| 121 | + } |
| 122 | + ctx.Logger.V(6).Info("got machines for cluster while reconciling load balancer") |
| 123 | + |
| 124 | + controlPlaneMachines := clusterutilv1.GetControlPlaneMachines(machines) |
| 125 | + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, r.Client, vsphereCluster.Namespace, vsphereCluster.Name) |
| 126 | + if err != nil { |
| 127 | + return ctrl.Result{}, err |
| 128 | + } |
| 129 | + ctx.Logger.V(6).Info("got vsphere machines for cluster while reconciling load balancer") |
| 130 | + |
| 131 | + controlPlaneIPs := []string{} |
| 132 | + for _, controlPlane := range controlPlaneMachines { |
| 133 | + vsphereMachine, ok := vsphereMachines[controlPlane.Name] |
| 134 | + if !ok { |
| 135 | + ctx.Logger.V(6).Info("machine not yet linked to the cluster", "machine-name", controlPlane.Name) |
| 136 | + continue |
| 137 | + } |
| 138 | + ip, err := infrautilv1.GetMachinePreferredIPAddress(vsphereMachine) |
| 139 | + if err == infrautilv1.ErrParseCIDR { |
| 140 | + return ctrl.Result{}, err |
| 141 | + } |
| 142 | + controlPlaneIPs = append(controlPlaneIPs, ip) |
| 143 | + |
| 144 | + } |
| 145 | + ctx.Logger.V(4).Info("gathered controlplane IPs", "controlplane-ips", controlPlaneIPs) |
| 146 | + |
| 147 | + apiEndpoint, err := loadBalancerProvider.Reconcile(ctx.Cluster, controlPlaneIPs) |
| 148 | + if err != nil { |
| 149 | + return ctrl.Result{}, err |
| 150 | + } |
| 151 | + vsphereCluster.Finalizers = append(vsphereCluster.Finalizers, infrav1.LoadBalancerFinalizer) |
| 152 | + vsphereCluster.Status.APIEndpoints = append(vsphereCluster.Status.APIEndpoints, infrav1.APIEndpoint{ |
| 153 | + Host: apiEndpoint.Host, |
| 154 | + Port: apiEndpoint.Port, |
| 155 | + }) |
| 156 | + vsphereCluster.Status.Ready = true |
| 157 | + return ctrl.Result{}, nil |
| 158 | +} |
| 159 | + |
| 160 | +// SetupWithManager adds this controller to the provided manager. |
| 161 | +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { |
| 162 | + return ctrl.NewControllerManagedBy(mgr). |
| 163 | + For(&infrav1.VSphereCluster{}). |
| 164 | + Complete(r) |
| 165 | +} |
0 commit comments