Skip to content
This repository was archived by the owner on Jan 9, 2023. It is now read-only.

Commit 70bc5b2

Browse files
committed
Merge remote-tracking branch 'upstream/master' into kube2iam-docs
2 parents 803c6ed + 68ce8b9 commit 70bc5b2

File tree

46 files changed

+916
-87
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+916
-87
lines changed

Gopkg.lock

Lines changed: 3 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Gopkg.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ required = [
9292

9393
[[constraint]]
9494
name = "github.com/hashicorp/terraform"
95-
version = "0.11.5"
95+
version = "0.11.7"
9696

9797
[[constraint]]
9898
name = "github.com/terraform-providers/terraform-provider-aws"

docs/spelling_wordlist.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,3 +78,7 @@ plugin
7878
checklist
7979
localhost
8080
ReplicaSet
81+
overprovisioning
82+
autoscaling
83+
preempted
84+
millicores

docs/user-guide.rst

Lines changed: 69 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,57 @@ The current implementation will configure the first instance pool of type worker
176176
in your cluster configuration to scale between `minCount` and `maxCount`. We
177177
plan to add support for an arbitrary number of worker instance pools.
178178

179+
Overprovisioning
180+
++++++++++++++++
181+
182+
Tarmak supports overprovisioning to give a
183+
fixed or proportional amount of headroom in the cluster. The technique used to
184+
implement overprovisioning is the same as described in the `cluster autoscaler
185+
documentation
186+
<https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#user-content-how-can-i-configure-overprovisioning-with-cluster-autoscaler>`_.
187+
The following `tarmak.yaml` snippet shows how to configure fixed
188+
overprovisioning. Note that cluster autoscaling must also be enabled.
189+
190+
.. code-block:: yaml
191+
192+
kubernetes:
193+
clusterAutoscaler:
194+
enabled: true
195+
overprovisioning:
196+
enabled: true
197+
reservedMillicoresPerReplica: 100
198+
reservedMegabytesPerReplica: 100
199+
replicaCount: 10
200+
...
201+
202+
This will deploy 10 pause Pods with a negative `PriorityClass
203+
<https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/>`_
204+
so that they will be preempted by any other pending Pods. Each Pod will request
205+
the specified number of millicores and megabytes. The following `tarmak.yaml`
206+
snippet shows how to configure proportional overprovisioning.
207+
208+
.. code-block:: yaml
209+
210+
kubernetes:
211+
clusterAutoscaler:
212+
enabled: true
213+
overprovisioning:
214+
enabled: true
215+
reservedMillicoresPerReplica: 100
216+
reservedMegabytesPerReplica: 100
217+
nodesPerReplica: 1
218+
coresPerReplica: 4
219+
...
220+
221+
The `nodesPerReplica` and `coresPerReplica` configuration parameters are
222+
described in the `cluster-proportional-autoscaler documentation
223+
<https://github.com/kubernetes-incubator/cluster-proportional-autoscaler#user-content-linear-mode>`_.
224+
225+
The image and version used by the cluster-proportional-autoscaler can also be
226+
specified using the `image` and `version` fields of the `overprovisioning`
227+
block. These values default to
228+
`k8s.gcr.io/cluster-proportional-autoscaler-amd64` and `1.1.2` respectively.
229+
179230
Logging
180231
~~~~~~~
181232

@@ -413,6 +464,19 @@ configuration like that:
413464
enabled: true
414465
externalScrapeTargetsOnly: true
415466
467+
468+
API Server
469+
~~~~~~~~~~~
470+
471+
It is possible to let Tarmak create an public endpoint for your APIserver.
472+
This can be used together with `Secure public endpoints <user-guide.html#secure-api-server>`__.
473+
474+
.. code-block:: yaml
475+
476+
kubernetes:
477+
apiServer:
478+
public: true
479+
416480
Secure public endpoints
417481
~~~~~~~~~~~~~~~~~~~~~~~
418482

@@ -460,12 +524,16 @@ by adding ``allowCIDRs`` in the instance pool block:
460524
size: large
461525
type: jenkins
462526
527+
.. _secure-api-server:
463528

464529
API Server
465530
++++++++++
466531

467532
For API server you can overwrite the environment level by adding ``allowCIDRs``
468-
to the kubernetes block
533+
to the kubernetes block.
534+
535+
.. warning::
536+
For this to work, you need to set your `API Server public <user-guide.html#api-server>`__ first.
469537

470538
.. code-block:: yaml
471539

pkg/apis/cluster/v1alpha1/cluster.go

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,21 @@ type ClusterKubernetes struct {
8484
}
8585

8686
type ClusterKubernetesClusterAutoscaler struct {
87-
Enabled bool `json:"enabled,omitempty"`
88-
Image string `json:"image,omitempty"`
89-
Version string `json:"version,omitempty"`
87+
Enabled bool `json:"enabled,omitempty"`
88+
Image string `json:"image,omitempty"`
89+
Version string `json:"version,omitempty"`
90+
Overprovisioning *ClusterKubernetesClusterAutoscalerOverprovisioning `json:"overprovisioning,omitempty"`
91+
}
92+
93+
type ClusterKubernetesClusterAutoscalerOverprovisioning struct {
94+
Enabled bool `json:"enabled,omitempty"`
95+
Image string `json:"image,omitempty"`
96+
Version string `json:"version,omitempty"`
97+
ReservedMillicoresPerReplica int `json:"reservedMillicoresPerReplica,omitempty"`
98+
ReservedMegabytesPerReplica int `json:"reservedMegabytesPerReplica,omitempty"`
99+
CoresPerReplica int `json:"coresPerReplica,omitempty"`
100+
NodesPerReplica int `json:"nodesPerReplica,omitempty"`
101+
ReplicaCount int `json:"replicaCount,omitempty"`
90102
}
91103

92104
type ClusterKubernetesTiller struct {

pkg/puppet/puppet.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,29 @@ func kubernetesClusterConfigPerRole(conf *clusterv1alpha1.ClusterKubernetes, rol
170170
if conf.ClusterAutoscaler.Version != "" {
171171
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::version: "%s"`, conf.ClusterAutoscaler.Version))
172172
}
173+
if conf.ClusterAutoscaler.Overprovisioning != nil && conf.ClusterAutoscaler.Overprovisioning.Enabled {
174+
hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::enable_overprovisioning: true`)
175+
hieraData.variables = append(hieraData.variables, `kubernetes::enable_pod_priority: true`)
176+
177+
if conf.ClusterAutoscaler.Overprovisioning.Image != "" {
178+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_image: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Image))
179+
}
180+
if conf.ClusterAutoscaler.Overprovisioning.Version != "" {
181+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_version: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Version))
182+
}
183+
184+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_millicores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica))
185+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_megabytes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica))
186+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::cores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.CoresPerReplica))
187+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::nodes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.NodesPerReplica))
188+
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::replica_count: %d`, conf.ClusterAutoscaler.Overprovisioning.ReplicaCount))
189+
}
190+
}
191+
192+
if roleName == clusterv1alpha1.KubernetesWorkerRoleName && conf.ClusterAutoscaler != nil && conf.ClusterAutoscaler.Enabled {
193+
if conf.ClusterAutoscaler.Overprovisioning != nil && conf.ClusterAutoscaler.Overprovisioning.Enabled {
194+
hieraData.variables = append(hieraData.variables, `kubernetes::enable_pod_priority: true`)
195+
}
173196
}
174197

175198
if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.Tiller != nil && conf.Tiller.Enabled {
@@ -332,6 +355,12 @@ func (p *Puppet) writeHieraData(puppetPath string, cluster interfaces.Cluster) e
332355
variables = append(variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::instance_pool_name: "%s"`, workerInstancePoolName))
333356
}
334357

358+
// etcd
359+
if instancePool.Role().Name() == clusterv1alpha1.KubernetesEtcdRoleName {
360+
variables = append(variables, fmt.Sprintf(`tarmak::etcd_instances: %d`, instancePool.MinCount()))
361+
variables = append(variables, `tarmak::etcd_mount_unit: "var-lib-etcd.mount"`)
362+
}
363+
335364
// classes
336365
err = p.writeLines(
337366
filepath.Join(hieraPath, "instance_pools", fmt.Sprintf("%s_classes.yaml", instancePool.Name())), classes,

pkg/tarmak/cluster/cluster.go

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,11 @@ func (c *Cluster) Validate() (result error) {
194194
result = multierror.Append(result, err)
195195
}
196196

197+
// validate overprovisioning
198+
if err := c.validateClusterAutoscaler(); err != nil {
199+
result = multierror.Append(result, fmt.Errorf("invalid overprovisioning configuration: %s", err))
200+
}
201+
197202
//validate apiserver
198203
if k := c.Config().Kubernetes; k != nil {
199204
if apiServer := k.APIServer; apiServer != nil {
@@ -249,6 +254,37 @@ func (c *Cluster) validateLoggingSinks() (result error) {
249254
return nil
250255
}
251256

257+
// validate overprovisioning
258+
func (c *Cluster) validateClusterAutoscaler() (result error) {
259+
260+
if c.Config().Kubernetes != nil && c.Config().Kubernetes.ClusterAutoscaler != nil && c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning != nil {
261+
if !c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.Enabled {
262+
return nil
263+
}
264+
if c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.Enabled && !c.Config().Kubernetes.ClusterAutoscaler.Enabled {
265+
return fmt.Errorf("cannot enable overprovisioning if cluster autoscaling is disabled")
266+
}
267+
if c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica < 0 ||
268+
c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica < 0 ||
269+
c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.CoresPerReplica < 0 ||
270+
c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.NodesPerReplica < 0 ||
271+
c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.ReplicaCount < 0 {
272+
return fmt.Errorf("cannot set negative overprovisioning parameters")
273+
}
274+
if c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica == 0 && c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica == 0 {
275+
return fmt.Errorf("one of reservedMillicoresPerReplica and reservedMegabytesPerReplica must be set")
276+
}
277+
if (c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.CoresPerReplica > 0 || c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.NodesPerReplica > 0) && c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.ReplicaCount > 0 {
278+
return fmt.Errorf("cannot configure both static and per replica overprovisioning rules")
279+
}
280+
if (c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.Image != "" || c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.Version != "") && (c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.CoresPerReplica == 0 && c.Config().Kubernetes.ClusterAutoscaler.Overprovisioning.NodesPerReplica == 0) {
281+
return fmt.Errorf("setting overprovisioning image or version is only valid when proportional overprovisioning is enabled")
282+
}
283+
}
284+
285+
return nil
286+
}
287+
252288
// Validate APIServer
253289
func (c *Cluster) validateAPIServer() (result error) {
254290
for _, cidr := range c.Config().Kubernetes.APIServer.AllowCIDRs {

pkg/tarmak/cluster/cluster_test.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,62 @@ func TestCluster_NewMinimalHub(t *testing.T) {
163163
}
164164
}
165165

166+
func TestValidateClusterAutoscaler(t *testing.T) {
167+
clusterConfig := config.NewClusterSingle("single", "cluster")
168+
config.ApplyDefaults(clusterConfig)
169+
clusterConfig.Kubernetes.ClusterAutoscaler.Enabled = true
170+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning = &clusterv1alpha1.ClusterKubernetesClusterAutoscalerOverprovisioning{}
171+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.Enabled = false
172+
173+
cluster := &Cluster{
174+
conf: clusterConfig,
175+
}
176+
177+
// overprovisioning disabled without required settings
178+
if err := cluster.validateClusterAutoscaler(); err != nil {
179+
t.Errorf("validation should pass when cluster autoscaler is enabled and overprovisioning is disabled without required settings: %s", err)
180+
}
181+
182+
// reservations not set
183+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.Enabled = true
184+
if cluster.validateClusterAutoscaler() == nil {
185+
t.Errorf("validation should fail when no reservations are set")
186+
}
187+
188+
// autoscaler and overprovisioning enabled
189+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica = 1
190+
if err := cluster.validateClusterAutoscaler(); err != nil {
191+
t.Errorf("validation should pass when cluster autoscaler and overprovisioning are enabled: %s", err)
192+
}
193+
194+
// autoscaler disabled with overprovisioning enabled
195+
clusterConfig.Kubernetes.ClusterAutoscaler.Enabled = false
196+
if cluster.validateClusterAutoscaler() == nil {
197+
t.Errorf("validation should fail when cluster autoscaler is disabled and overprovisioning is enabled")
198+
}
199+
clusterConfig.Kubernetes.ClusterAutoscaler.Enabled = true
200+
201+
// negative reserved millicores
202+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica = -1
203+
if cluster.validateClusterAutoscaler() == nil {
204+
t.Errorf("validation should fail when reserving negative millicores")
205+
}
206+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica = 1
207+
208+
// static overprovisioning with propoertional autoscaler
209+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.Image = "image"
210+
if cluster.validateClusterAutoscaler() == nil {
211+
t.Errorf("validation should fail when configuring static overprovisioning and proportional autoscaler")
212+
}
213+
214+
// static and proportional overprovisioning
215+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.CoresPerReplica = 1
216+
clusterConfig.Kubernetes.ClusterAutoscaler.Overprovisioning.ReplicaCount = 1
217+
if cluster.validateClusterAutoscaler() == nil {
218+
t.Errorf("validation should fail when configuring static and proportional overprovisioning")
219+
}
220+
}
221+
166222
/*
167223
func testDefaultClusterConfig() *config.Cluster {
168224
return &config.Cluster{

pkg/tarmak/cluster/firewall/rules.go

Lines changed: 29 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,22 @@ type Rule struct {
3333
}
3434

3535
var (
36-
zeroPort = uint16(0)
37-
sshPort = uint16(22)
38-
bgpPort = uint16(179)
39-
overlayPort = uint16(2359)
40-
k8sEventsPort = uint16(2369)
41-
k8sPort = uint16(2379)
42-
apiPort = uint16(6443)
43-
consulRCPPort = uint16(8300)
44-
consulSerfPort = uint16(8301)
45-
vaultPort = uint16(8200)
46-
calicoMetricsPort = uint16(9091)
47-
nodePort = uint16(9100)
48-
blackboxPort = uint16(9115)
49-
wingPort = uint16(9443)
50-
maxPort = uint16(65535)
36+
zeroPort = uint16(0)
37+
sshPort = uint16(22)
38+
bgpPort = uint16(179)
39+
overlayPort = uint16(2359)
40+
k8sEventsPort = uint16(2369)
41+
k8sPort = uint16(2379)
42+
apiPort = uint16(6443)
43+
consulRCPPort = uint16(8300)
44+
consulSerfPort = uint16(8301)
45+
vaultPort = uint16(8200)
46+
clusterAutoscalerMetricsPort = uint16(8085)
47+
calicoMetricsPort = uint16(9091)
48+
nodePort = uint16(9100)
49+
blackboxPort = uint16(9115)
50+
wingPort = uint16(9443)
51+
maxPort = uint16(65535)
5152

5253
k8sIdentifier = "k8s"
5354
k8sEventsIdentifier = "k8sevents"
@@ -96,14 +97,24 @@ func newSSHService() Service {
9697

9798
func newCalicoMetricsService() Service {
9899
return Service{
99-
Name: "metrics",
100+
Name: "calico",
100101
Protocol: "tcp",
101102
Ports: []Port{
102103
Port{Single: &calicoMetricsPort},
103104
},
104105
}
105106
}
106107

108+
func newClusterAutoscalerMetricsService() Service {
109+
return Service{
110+
Name: "cluster_autoscaler",
111+
Protocol: "tcp",
112+
Ports: []Port{
113+
Port{Single: &clusterAutoscalerMetricsPort},
114+
},
115+
}
116+
}
117+
107118
func newIPIPService() Service {
108119
return Service{
109120
Name: "ipip",
@@ -302,8 +313,8 @@ func Rules() (rules []*Rule) {
302313

303314
//// Master
304315
&Rule{
305-
Comment: "allow workers/master to connect to calico's service + api server",
306-
Services: []Service{newBGPService(), newIPIPService(), newCalicoMetricsService(), newAPIService()},
316+
Comment: "allow workers/master to connect to calico's service, cluster autoscaler's service + api server",
317+
Services: []Service{newBGPService(), newIPIPService(), newCalicoMetricsService(), newClusterAutoscalerMetricsService(), newAPIService()},
307318
Direction: "ingress",
308319
Sources: []Host{
309320
Host{Role: "master"},

0 commit comments

Comments
 (0)