Skip to content

Commit 33f39b4

Browse files
committed
🏃 Proposal to extract cluster-specifics out of the Manager
1 parent e50c7b8 commit 33f39b4

File tree

1 file changed

+228
-0
lines changed

1 file changed

+228
-0
lines changed
Lines changed: 228 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,228 @@
1+
Move cluster-specific code out of the manager
2+
===================
3+
4+
## Motivation
5+
6+
Today, it is already possible to use controller-runtime to build controllers that act on
7+
more than one cluster. However, this is undocumented and not straight-forward, requiring
8+
users to look into the implementation details to figure out how to make this work.
9+
10+
## Goals
11+
12+
* Provide an easy-to-discover way to build controllers that act on multiple clusters
13+
* Decouple the management of `Runnables` from the construction of "things that require a kubeconfig"
14+
* Do not introduce changes for users that build controllers that act on one cluster only
15+
16+
## Non-Goals
17+
18+
## Proposal
19+
20+
Currently, the `./pkg/manager.Manager` has two purposes:
21+
22+
* Handle running controllers/other runnables and managing their lifecycle
23+
* Setting up various things to interact with the Kubernetes cluster,
24+
for example a `Client` and a `Cache`
25+
26+
This works very well when building controllers that talk to a single cluster,
27+
however some use-cases require controllers that interact with more than
28+
one cluster. This multi-cluster usecase is very awkward today, because it
29+
requires to construct one manager per cluster and adding all subsequent
30+
managers to the first one.
31+
32+
This document proposes to move all cluster-specific code out of the manager
33+
and into a new package and interface, that then gets embedded into the manager.
34+
This allows to keep the usage for single-cluster cases the same and introduce
35+
this change in a backwards-compatible manner.
36+
37+
Furthermore, the manager gets extended to start all caches before any other
38+
`runnables` are started.
39+
40+
41+
The new `Cluster` interface will look like this:
42+
43+
```go
44+
type Cluster interface {
45+
// SetFields will set cluster-specific dependencies on an object for which the object has implemented the inject
46+
// interface, specifically inject.Client, inject.Cache, inject.Scheme, inject.Config and inject.APIReader
47+
SetFields(interface{}) error
48+
49+
// GetConfig returns an initialized Config
50+
GetConfig() *rest.Config
51+
52+
// GetClient returns a client configured with the Config. This client may
53+
// not be a fully "direct" client -- it may read from a cache, for
54+
// instance. See Options.NewClient for more information on how the default
55+
// implementation works.
56+
GetClient() client.Client
57+
58+
// GetFieldIndexer returns a client.FieldIndexer configured with the client
59+
GetFieldIndexer() client.FieldIndexer
60+
61+
// GetCache returns a cache.Cache
62+
GetCache() cache.Cache
63+
64+
// GetEventRecorderFor returns a new EventRecorder for the provided name
65+
GetEventRecorderFor(name string) record.EventRecorder
66+
67+
// GetRESTMapper returns a RESTMapper
68+
GetRESTMapper() meta.RESTMapper
69+
70+
// GetAPIReader returns a reader that will be configured to use the API server.
71+
// This should be used sparingly and only when the client does not fit your
72+
// use case.
73+
GetAPIReader() client.Reader
74+
75+
// GetScheme returns an initialized Scheme
76+
GetScheme() *runtime.Scheme
77+
78+
// Start starts the connection tothe Cluster
79+
Start(<-chan struct{}) error
80+
}
81+
```
82+
83+
And the current `Manager` interface will change to look like this:
84+
85+
```go
86+
type Manager interface {
87+
// Cluster holds objects to connect to a cluster
88+
cluser.Cluster
89+
90+
// Add will set requested dependencies on the component, and cause the component to be
91+
// started when Start is called. Add will inject any dependencies for which the argument
92+
// implements the inject interface - e.g. inject.Client.
93+
// Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either
94+
// non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled).
95+
Add(Runnable) error
96+
97+
// Elected is closed when this manager is elected leader of a group of
98+
// managers, either because it won a leader election or because no leader
99+
// election was configured.
100+
Elected() <-chan struct{}
101+
102+
// SetFields will set any dependencies on an object for which the object has implemented the inject
103+
// interface - e.g. inject.Client.
104+
SetFields(interface{}) error
105+
106+
// AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics.
107+
// Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be
108+
// sensitive and shouldn't be exposed publicly.
109+
// If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as
110+
// Runnable to the manager via Add method.
111+
AddMetricsExtraHandler(path string, handler http.Handler) error
112+
113+
// AddHealthzCheck allows you to add Healthz checker
114+
AddHealthzCheck(name string, check healthz.Checker) error
115+
116+
// AddReadyzCheck allows you to add Readyz checker
117+
AddReadyzCheck(name string, check healthz.Checker) error
118+
119+
// Start starts all registered Controllers and blocks until the Stop channel is closed.
120+
// Returns an error if there is an error starting any controller.
121+
// If LeaderElection is used, the binary must be exited immediately after this returns,
122+
// otherwise components that need leader election might continue to run after the leader
123+
// lock was lost.
124+
Start(<-chan struct{}) error
125+
126+
// GetWebhookServer returns a webhook.Server
127+
GetWebhookServer() *webhook.Server
128+
}
129+
```
130+
131+
Furthermore, during startup, the `Manager` will use type assertion to find `Cluster`s
132+
to be able to start their caches before anything else:
133+
134+
```go
135+
type HasCaches interface {
136+
GetCache()
137+
}
138+
if getter, hasCaches := runnable.(hasCaches); hasCaches {
139+
m.caches = append(m.caches, getter())
140+
}
141+
```
142+
143+
```go
144+
for idx := range cm.caches {
145+
go func(idx int) {cm.caches[idx].Start(cm.internalStop)}
146+
}
147+
148+
for _, cache := range cm.caches {
149+
cache.WaitForCacheSync(cm.internalStop)
150+
}
151+
152+
// Start all other runnables
153+
```
154+
155+
## Example
156+
157+
Below is a sample `reconciler` that will create a secret in a `mirrorCluster` for each
158+
secret found in `referenceCluster` if none of that name already exists. To keep the sample
159+
short, it won't compare the contents of the secrets.
160+
161+
```go
162+
type secretMirrorReconciler struct {
163+
referenceClusterClient, mirrorClusterClient client.Client
164+
}
165+
166+
func (r *secretMirrorReconciler) Reconcile(r reconcile.Request)(reconcile.Result, error){
167+
s := &corev1.Secret{}
168+
if err := r.referenceClusterClient.Get(context.TODO(), r.NamespacedName, s); err != nil {
169+
if kerrors.IsNotFound{ return reconcile.Result{}, nil }
170+
return reconcile.Result, err
171+
}
172+
173+
if err := r.mirrorClusterClient.Get(context.TODO(), r.NamespacedName, &corev1.Secret); err != nil {
174+
if !kerrors.IsNotFound(err) {
175+
return reconcile.Result{}, err
176+
}
177+
178+
mirrorSecret := &corev1.Secret{
179+
ObjectMeta: metav1.ObjectMeta{Namespace: s.Namespace, Name: s.Name},
180+
Data: s.Data,
181+
}
182+
return reconcile.Result{}, r.mirrorClusterClient.Create(context.TODO(), mirrorSecret)
183+
}
184+
185+
return nil
186+
}
187+
188+
func NewSecretMirrorReconciler(mgr manager.Manager, mirrorConnector clusterconnector.ClusterConnector) error {
189+
return ctrl.NewControllerManagedBy(mgr).
190+
// Watch Secrets in the reference cluster
191+
For(&corev1.Secret{}).
192+
// Watch pods in the mirror cluster
193+
Watches(
194+
source.NewKindWithCache(&corev1.Secret{}, mirrorCluster.GetCache()),
195+
&handler.EnqueueRequestForObject{},
196+
).
197+
Complete(&secretMirrorReconciler{
198+
referenceClusterClient: mgr.GetClient(),
199+
mirrorClusterClient: mirrorCluster.GetClient(),
200+
})
201+
}
202+
}
203+
204+
func main(){
205+
206+
mgr, err := manager.New(cfg1, manager.Options{})
207+
if err != nil {
208+
panic(err)
209+
}
210+
211+
mirrorClusterConnector, err := clusterconnector.New(cfg2)
212+
if err != nil {
213+
panic(err)
214+
}
215+
216+
if err := mgr.Add(mirrorConnector); err != nil {
217+
panic(err)
218+
}
219+
220+
if err := NewSecretMirrorReconciler(mgr, mirrorClusterConnector); err != nil {
221+
panic(err)
222+
}
223+
224+
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
225+
panic(err)
226+
}
227+
}
228+
```

0 commit comments

Comments
 (0)