|
| 1 | +Move cluster-specific code out of the manager |
| 2 | +=================== |
| 3 | + |
| 4 | +## Motivation |
| 5 | + |
| 6 | +Today, it is already possible to use controller-runtime to build controllers that act on |
| 7 | +more than one cluster. However, this is undocumented and not straight-forward, requiring |
| 8 | +users to look into the implementation details to figure out how to make this work. |
| 9 | + |
| 10 | +## Goals |
| 11 | + |
| 12 | +* Provide an easy-to-discover way to build controllers that act on multiple clusters |
| 13 | +* Decouple the management of `Runnables` from the construction of "things that require a kubeconfig" |
| 14 | +* Do not introduce changes for users that build controllers that act on one cluster only |
| 15 | + |
| 16 | +## Non-Goals |
| 17 | + |
| 18 | +## Proposal |
| 19 | + |
| 20 | +Currently, the `./pkg/manager.Manager` has two purposes: |
| 21 | + |
| 22 | +* Handle running controllers/other runnables and managing their lifecycle |
| 23 | +* Setting up various things to interact with the Kubernetes cluster, |
| 24 | + for example a `Client` and a `Cache` |
| 25 | + |
| 26 | +This works very well when building controllers that talk to a single cluster, |
| 27 | +however some use-cases require controllers that interact with more than |
| 28 | +one cluster. This multi-cluster usecase is very awkward today, because it |
| 29 | +requires to construct one manager per cluster and adding all subsequent |
| 30 | +managers to the first one. |
| 31 | + |
| 32 | +This document proposes to move all cluster-specific code out of the manager |
| 33 | +and into a new package and interface, that then gets embedded into the manager. |
| 34 | +This allows to keep the usage for single-cluster cases the same and introduce |
| 35 | +this change in a backwards-compatible manner. |
| 36 | + |
| 37 | +Furthermore, the manager gets extended to start all caches before any other |
| 38 | +`runnables` are started. |
| 39 | + |
| 40 | + |
| 41 | +The new `ClusterConnector` interface will look like this: |
| 42 | + |
| 43 | +```go |
| 44 | +type ClusterConnector interface { |
| 45 | + // SetFields will set cluster-specific dependencies on an object for which the object has implemented the inject |
| 46 | + // interface, specifically inject.Client, inject.Cache, inject.Scheme, inject.Config and inject.APIReader |
| 47 | + SetFields(interface{}) error |
| 48 | + |
| 49 | + // GetConfig returns an initialized Config |
| 50 | + GetConfig() *rest.Config |
| 51 | + |
| 52 | + // GetClient returns a client configured with the Config. This client may |
| 53 | + // not be a fully "direct" client -- it may read from a cache, for |
| 54 | + // instance. See Options.NewClient for more information on how the default |
| 55 | + // implementation works. |
| 56 | + GetClient() client.Client |
| 57 | + |
| 58 | + // GetFieldIndexer returns a client.FieldIndexer configured with the client |
| 59 | + GetFieldIndexer() client.FieldIndexer |
| 60 | + |
| 61 | + // GetCache returns a cache.Cache |
| 62 | + GetCache() cache.Cache |
| 63 | + |
| 64 | + // GetEventRecorderFor returns a new EventRecorder for the provided name |
| 65 | + GetEventRecorderFor(name string) record.EventRecorder |
| 66 | + |
| 67 | + // GetRESTMapper returns a RESTMapper |
| 68 | + GetRESTMapper() meta.RESTMapper |
| 69 | + |
| 70 | + // GetAPIReader returns a reader that will be configured to use the API server. |
| 71 | + // This should be used sparingly and only when the client does not fit your |
| 72 | + // use case. |
| 73 | + GetAPIReader() client.Reader |
| 74 | + |
| 75 | + // GetScheme returns an initialized Scheme |
| 76 | + GetScheme() *runtime.Scheme |
| 77 | + |
| 78 | + // Start starts the ClusterConnector |
| 79 | + Start(<-chan struct{}) error |
| 80 | +} |
| 81 | +``` |
| 82 | + |
| 83 | +And the current `Manager` interface will change to look like this: |
| 84 | + |
| 85 | +```go |
| 86 | +type Manager interface { |
| 87 | + // ClusterConnector holds objects to connect to a cluster |
| 88 | + cluserconnector.ClusterConnector |
| 89 | + |
| 90 | + // Add will set requested dependencies on the component, and cause the component to be |
| 91 | + // started when Start is called. Add will inject any dependencies for which the argument |
| 92 | + // implements the inject interface - e.g. inject.Client. |
| 93 | + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either |
| 94 | + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). |
| 95 | + Add(Runnable) error |
| 96 | + |
| 97 | + // Elected is closed when this manager is elected leader of a group of |
| 98 | + // managers, either because it won a leader election or because no leader |
| 99 | + // election was configured. |
| 100 | + Elected() <-chan struct{} |
| 101 | + |
| 102 | + // SetFields will set any dependencies on an object for which the object has implemented the inject |
| 103 | + // interface - e.g. inject.Client. |
| 104 | + SetFields(interface{}) error |
| 105 | + |
| 106 | + // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. |
| 107 | + // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be |
| 108 | + // sensitive and shouldn't be exposed publicly. |
| 109 | + // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as |
| 110 | + // Runnable to the manager via Add method. |
| 111 | + AddMetricsExtraHandler(path string, handler http.Handler) error |
| 112 | + |
| 113 | + // AddHealthzCheck allows you to add Healthz checker |
| 114 | + AddHealthzCheck(name string, check healthz.Checker) error |
| 115 | + |
| 116 | + // AddReadyzCheck allows you to add Readyz checker |
| 117 | + AddReadyzCheck(name string, check healthz.Checker) error |
| 118 | + |
| 119 | + // Start starts all registered Controllers and blocks until the Stop channel is closed. |
| 120 | + // Returns an error if there is an error starting any controller. |
| 121 | + // If LeaderElection is used, the binary must be exited immediately after this returns, |
| 122 | + // otherwise components that need leader election might continue to run after the leader |
| 123 | + // lock was lost. |
| 124 | + Start(<-chan struct{}) error |
| 125 | + |
| 126 | + // GetWebhookServer returns a webhook.Server |
| 127 | + GetWebhookServer() *webhook.Server |
| 128 | +} |
| 129 | +``` |
| 130 | + |
| 131 | +Additionally, we will export a new `MultiClusterManager` that is intended to be used for |
| 132 | +multi-cluster controllers: |
| 133 | + |
| 134 | +```go |
| 135 | +type MultiClusterManager interface { |
| 136 | + Manager |
| 137 | + // Add another named cluster to the MultiClusterManager. The name |
| 138 | + // must be unique. The MultiClusterManager will wait for all clusters |
| 139 | + // caches to be started before starting anytything else. |
| 140 | + AddCluster(config *rest.Config, name string) error |
| 141 | + // GetClusters returns all Clusters this MultiClusterManager knows about. |
| 142 | + // The cluster used to construct the MultiClusterManager is named `primary` |
| 143 | + // and will be used for LeaderElection, if enabled. |
| 144 | + GetClusters() map[string]clusterconnector.ClusterConnector |
| 145 | +} |
| 146 | +``` |
| 147 | + |
| 148 | +Furthermore, during startup, the `Manager` will use type assertion to find `ClusterConnector`s |
| 149 | +to be able to start their caches before anything else: |
| 150 | + |
| 151 | +```go |
| 152 | +if cc, isClusterConnector:= runnable.(clusterconnector.ClusterConnector); isClusterConnector { |
| 153 | + m.caches = append(m.caches, cc.GetCache()) |
| 154 | +} |
| 155 | +``` |
| 156 | + |
| 157 | +```go |
| 158 | +for idx := range cm.caches { |
| 159 | + go func(idx int) {cm.caches[idx].Start(cm.internalStop)} |
| 160 | +} |
| 161 | + |
| 162 | +for _, cache := range cm.caches { |
| 163 | + cache.WaitForCacheSync(cm.internalStop) |
| 164 | +} |
| 165 | + |
| 166 | +// Start all other runnables |
| 167 | +``` |
| 168 | + |
| 169 | +## Example |
| 170 | + |
| 171 | +Below is a sample `reconciler` that will create a secret in a `mirrorCluster` for each |
| 172 | +secret found in `referenceCluster` if none of that name already exists. To keep the sample |
| 173 | +short, it won't compare the contents of the secrets. |
| 174 | + |
| 175 | +```go |
| 176 | +type secretMirrorReconciler struct { |
| 177 | + referenceClusterClient, mirrorClusterClient client.Client |
| 178 | +} |
| 179 | + |
| 180 | +func (r *secretMirrorReconciler) Reconcile(r reconcile.Request)(reconcile.Result, error){ |
| 181 | + s := &corev1.Secret{} |
| 182 | + if err := r.referenceClusterClient.Get(context.TODO(), r.NamespacedName, s); err != nil { |
| 183 | + if kerrors.IsNotFound{ return reconcile.Result{}, nil } |
| 184 | + return reconcile.Result, err |
| 185 | + } |
| 186 | + |
| 187 | + if err := r.mirrorClusterClient.Get(context.TODO(), r.NamespacedName, &corev1.Secret); err != nil { |
| 188 | + if !kerrors.IsNotFound(err) { |
| 189 | + return reconcile.Result{}, err |
| 190 | + } |
| 191 | + |
| 192 | + mirrorSecret := &corev1.Secret{ |
| 193 | + ObjectMeta: metav1.ObjectMeta{Namespace: s.Namespace, Name: s.Name}, |
| 194 | + Data: s.Data, |
| 195 | + } |
| 196 | + return reconcile.Result{}, r.mirrorClusterClient.Create(context.TODO(), mirrorSecret) |
| 197 | + } |
| 198 | + |
| 199 | + return nil |
| 200 | +} |
| 201 | + |
| 202 | +func NewSecretMirrorReconciler(mgr manager.Manager, mirrorConnector clusterconnector.ClusterConnector) error { |
| 203 | + return ctrl.NewControllerManagedBy(mgr). |
| 204 | + // Watch Secrets in the reference cluster |
| 205 | + For(&corev1.Secret{}). |
| 206 | + // Watch pods in the mirror cluster |
| 207 | + Watches( |
| 208 | + source.NewKindWithCache(&corev1.Secret{}, mirrorCluster.GetCache()), |
| 209 | + &handler.EnqueueRequestForObject{}, |
| 210 | + ). |
| 211 | + Complete(&secretMirrorReconciler{ |
| 212 | + referenceClusterClient: mgr.GetClient(), |
| 213 | + mirrorClusterClient: mirrorCluster.GetClient(), |
| 214 | + }) |
| 215 | + } |
| 216 | +} |
| 217 | + |
| 218 | +func main(){ |
| 219 | + |
| 220 | + mgr, err := manager.New(cfg1, manager.Options{}) |
| 221 | + if err != nil { |
| 222 | + panic(err) |
| 223 | + } |
| 224 | + |
| 225 | + mirrorClusterConnector, err := clusterconnector.New(cfg2) |
| 226 | + if err != nil { |
| 227 | + panic(err) |
| 228 | + } |
| 229 | + |
| 230 | + if err := mgr.Add(mirrorConnector); err != nil { |
| 231 | + panic(err) |
| 232 | + } |
| 233 | + |
| 234 | + if err := NewSecretMirrorReconciler(mgr, mirrorClusterConnector); err != nil { |
| 235 | + panic(err) |
| 236 | + } |
| 237 | + |
| 238 | + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { |
| 239 | + panic(err) |
| 240 | + } |
| 241 | +} |
| 242 | +``` |
0 commit comments