Skip to content

Commit 2addd3a

Browse files
authored
[EKSCTL create cluster command] Authorise self-managed nodes via aws-auth configmap when EKS access entries are disabled (#7698)
* Disable access entry creation for self-managed nodes on clusters with CONFIG_MAP only * fix logic for updating aws-auth configmap
1 parent f9475f8 commit 2addd3a

File tree

7 files changed

+67
-34
lines changed

7 files changed

+67
-34
lines changed

integration/tests/accessentries/accessentries_test.go

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ var (
5050
namespaceRoleARN string
5151
err error
5252

53-
apiEnabledCluster = "accessentries-api-enabled-2"
54-
apiDisabledCluster = "accessentries-api-disabled-2"
53+
apiEnabledCluster = "accessentries-api-enabled"
54+
apiDisabledCluster = "accessentries-api-disabled"
5555
)
5656

5757
func init() {
@@ -123,24 +123,39 @@ var _ = Describe("(Integration) [AccessEntries Test]", func() {
123123
cfg = makeClusterConfig(apiDisabledCluster)
124124
})
125125

126-
It("should create a cluster with authenticationMode set to CONFIG_MAP", func() {
126+
It("should create a cluster with authenticationMode set to CONFIG_MAP and allow self-managed nodes to join via aws-auth", func() {
127127
cfg.AccessConfig.AuthenticationMode = ekstypes.AuthenticationModeConfigMap
128-
128+
cfg.NodeGroups = append(cfg.NodeGroups, &api.NodeGroup{
129+
NodeGroupBase: &api.NodeGroupBase{
130+
Name: "aws-auth-ng",
131+
ScalingConfig: &api.ScalingConfig{
132+
DesiredCapacity: aws.Int(1),
133+
},
134+
},
135+
})
129136
data, err := json.Marshal(cfg)
130137
Expect(err).NotTo(HaveOccurred())
131138

132139
Expect(params.EksctlCreateCmd.
133140
WithArgs(
134141
"cluster",
135142
"--config-file", "-",
136-
"--without-nodegroup",
137143
"--verbose", "4",
138144
).
139145
WithoutArg("--region", params.Region).
140146
WithStdin(bytes.NewReader(data))).To(RunSuccessfully())
141147

142148
Expect(ctl.RefreshClusterStatus(context.Background(), cfg)).NotTo(HaveOccurred())
143149
Expect(ctl.IsAccessEntryEnabled()).To(BeFalse())
150+
151+
Expect(params.EksctlGetCmd.WithArgs(
152+
"nodegroup",
153+
"--cluster", apiDisabledCluster,
154+
"--name", "aws-auth-ng",
155+
"-o", "yaml",
156+
)).To(runner.RunSuccessfullyWithOutputStringLines(
157+
ContainElement(ContainSubstring("Status: CREATE_COMPLETE")),
158+
))
144159
})
145160

146161
It("should fail early when trying to create access entries", func() {
@@ -400,6 +415,7 @@ var _ = SynchronizedAfterSuite(func() {}, func() {
400415
WithArgs(
401416
"cluster",
402417
"--name", apiDisabledCluster,
418+
"--disable-nodegroup-eviction",
403419
"--wait",
404420
)).To(RunSuccessfully())
405421

pkg/actions/nodegroup/create.go

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -285,11 +285,17 @@ func (m *Manager) postNodeCreationTasks(ctx context.Context, clientSet kubernete
285285
timeoutCtx, cancel := context.WithTimeout(ctx, m.ctl.AWSProvider.WaitTimeout())
286286
defer cancel()
287287

288-
if (!m.accessEntry.IsEnabled() && !api.IsDisabled(options.UpdateAuthConfigMap)) || api.IsEnabled(options.UpdateAuthConfigMap) {
288+
// authorize self-managed nodes to join the cluster via aws-auth configmap
289+
// if EKS access entries are disabled OR
290+
if (!m.accessEntry.IsEnabled() && !api.IsDisabled(options.UpdateAuthConfigMap)) ||
291+
// if explicitly requested by the user
292+
api.IsEnabled(options.UpdateAuthConfigMap) {
289293
if err := eks.UpdateAuthConfigMap(m.cfg.NodeGroups, clientSet); err != nil {
290294
return err
291295
}
292296
}
297+
298+
// only wait for self-managed nodes to join if either authorization method is being used
293299
if !api.IsDisabled(options.UpdateAuthConfigMap) {
294300
for _, ng := range m.cfg.NodeGroups {
295301
if err := eks.WaitForNodes(timeoutCtx, clientSet, ng); err != nil {
@@ -298,6 +304,7 @@ func (m *Manager) postNodeCreationTasks(ctx context.Context, clientSet kubernete
298304
}
299305
}
300306
logger.Success("created %d nodegroup(s) in cluster %q", len(m.cfg.NodeGroups), m.cfg.Metadata.Name)
307+
301308
for _, ng := range m.cfg.ManagedNodeGroups {
302309
if err := eks.WaitForNodes(timeoutCtx, clientSet, ng); err != nil {
303310
if m.cfg.PrivateCluster.Enabled {
@@ -308,8 +315,8 @@ func (m *Manager) postNodeCreationTasks(ctx context.Context, clientSet kubernete
308315
}
309316
}
310317
}
311-
312318
logger.Success("created %d managed nodegroup(s) in cluster %q", len(m.cfg.ManagedNodeGroups), m.cfg.Metadata.Name)
319+
313320
return nil
314321
}
315322

pkg/cfn/manager/create_tasks.go

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ import (
88
"github.com/pkg/errors"
99
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1010

11+
ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types"
12+
1113
"github.com/weaveworks/eksctl/pkg/actions/accessentry"
1214
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
1315
iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc"
@@ -24,7 +26,7 @@ const (
2426
// NewTasksToCreateCluster defines all tasks required to create a cluster along
2527
// with some nodegroups; see CreateAllNodeGroups for how onlyNodeGroupSubset works.
2628
func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup,
27-
managedNodeGroups []*api.ManagedNodeGroup, accessEntries []api.AccessEntry, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree {
29+
managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree {
2830
taskTree := tasks.TaskTree{Parallel: false}
2931

3032
taskTree.Append(&createClusterTask{
@@ -34,8 +36,8 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup
3436
ctx: ctx,
3537
})
3638

37-
if len(accessEntries) > 0 {
38-
taskTree.Append(accessEntryCreator.CreateTasks(ctx, accessEntries))
39+
if len(accessConfig.AccessEntries) > 0 {
40+
taskTree.Append(accessEntryCreator.CreateTasks(ctx, accessConfig.AccessEntries))
3941
}
4042

4143
appendNodeGroupTasksTo := func(taskTree *tasks.TaskTree) {
@@ -44,7 +46,8 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup
4446
Parallel: true,
4547
IsSubTask: true,
4648
}
47-
if unmanagedNodeGroupTasks := c.NewUnmanagedNodeGroupTask(ctx, nodeGroups, false, false, false, vpcImporter); unmanagedNodeGroupTasks.Len() > 0 {
49+
disableAccessEntryCreation := accessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap
50+
if unmanagedNodeGroupTasks := c.NewUnmanagedNodeGroupTask(ctx, nodeGroups, false, false, disableAccessEntryCreation, vpcImporter); unmanagedNodeGroupTasks.Len() > 0 {
4851
unmanagedNodeGroupTasks.IsSubTask = true
4952
nodeGroupTasks.Append(unmanagedNodeGroupTasks)
5053
}

pkg/cfn/manager/fakes/fake_stack_manager.go

Lines changed: 8 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pkg/cfn/manager/interface.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ type StackManager interface {
8989
NewTasksToDeleteClusterWithNodeGroups(ctx context.Context, clusterStack *Stack, nodeGroupStacks []NodeGroupStack, clusterOperable bool, newOIDCManager NewOIDCManager, newTasksToDeleteAddonIAM NewTasksToDeleteAddonIAM, newTasksToDeletePodIdentityRole NewTasksToDeletePodIdentityRole, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, wait, force bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error)
9090
NewTasksToCreateIAMServiceAccounts(serviceAccounts []*api.ClusterIAMServiceAccount, oidc *iamoidc.OpenIDConnectManager, clientSetGetter kubernetes.ClientSetGetter) *tasks.TaskTree
9191
NewTaskToDeleteUnownedNodeGroup(ctx context.Context, clusterName, nodegroup string, nodeGroupDeleter NodeGroupDeleter, waitCondition *DeleteWaitCondition) tasks.Task
92-
NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup, accessEntries []api.AccessEntry, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree
92+
NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree
9393
NewTasksToDeleteIAMServiceAccounts(ctx context.Context, serviceAccounts []string, clientSetGetter kubernetes.ClientSetGetter, wait bool) (*tasks.TaskTree, error)
9494
NewTasksToDeleteNodeGroups(stacks []NodeGroupStack, shouldDelete func(_ string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error)
9595
NewTasksToDeleteOIDCProviderWithIAMServiceAccounts(ctx context.Context, newOIDCManager NewOIDCManager, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, force bool) (*tasks.TaskTree, error)

pkg/cfn/manager/tasks_test.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ var _ = Describe("StackCollection Tasks", func() {
7575

7676
It("should have nice description", func() {
7777
fakeVPCImporter := new(vpcfakes.FakeImporter)
78+
accessConfig := &api.AccessConfig{}
7879
// TODO use DescribeTable
7980

8081
// The supportsManagedNodes argument has no effect on the Describe call, so the values are alternated
@@ -99,7 +100,7 @@ var _ = Describe("StackCollection Tasks", func() {
99100
Expect(tasks.Describe()).To(Equal(`no tasks`))
100101
}
101102
{
102-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, nil, nil)
103+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, accessConfig, nil)
103104
Expect(tasks.Describe()).To(Equal(`
104105
2 sequential tasks: { create cluster control plane "test-cluster",
105106
2 parallel sub-tasks: {
@@ -110,18 +111,18 @@ var _ = Describe("StackCollection Tasks", func() {
110111
`))
111112
}
112113
{
113-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, nil, nil)
114+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil)
114115
Expect(tasks.Describe()).To(Equal(`
115116
2 sequential tasks: { create cluster control plane "test-cluster", create nodegroup "bar"
116117
}
117118
`))
118119
}
119120
{
120-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), nil, nil, nil, nil)
121+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), nil, nil, accessConfig, nil)
121122
Expect(tasks.Describe()).To(Equal(`1 task: { create cluster control plane "test-cluster" }`))
122123
}
123124
{
124-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroups("m1", "m2"), nil, nil)
125+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroups("m1", "m2"), accessConfig, nil)
125126
Expect(tasks.Describe()).To(Equal(`
126127
2 sequential tasks: { create cluster control plane "test-cluster",
127128
2 parallel sub-tasks: {
@@ -138,7 +139,7 @@ var _ = Describe("StackCollection Tasks", func() {
138139
`))
139140
}
140141
{
141-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroupsWithPropagatedTags("m1", "m2"), nil, nil)
142+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroupsWithPropagatedTags("m1", "m2"), accessConfig, nil)
142143
Expect(tasks.Describe()).To(Equal(`
143144
2 sequential tasks: { create cluster control plane "test-cluster",
144145
2 parallel sub-tasks: {
@@ -161,7 +162,7 @@ var _ = Describe("StackCollection Tasks", func() {
161162
`))
162163
}
163164
{
164-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("foo"), makeManagedNodeGroups("m1"), nil, nil)
165+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("foo"), makeManagedNodeGroups("m1"), accessConfig, nil)
165166
Expect(tasks.Describe()).To(Equal(`
166167
2 sequential tasks: { create cluster control plane "test-cluster",
167168
2 parallel sub-tasks: {
@@ -172,7 +173,7 @@ var _ = Describe("StackCollection Tasks", func() {
172173
`))
173174
}
174175
{
175-
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, nil, nil, &task{id: 1})
176+
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil, &task{id: 1})
176177
Expect(tasks.Describe()).To(Equal(`
177178
2 sequential tasks: { create cluster control plane "test-cluster",
178179
2 sequential sub-tasks: {

pkg/ctl/create/cluster.go

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"sync"
99

1010
"github.com/aws/aws-sdk-go-v2/aws"
11+
ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types"
1112

1213
"github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector"
1314
"github.com/kris-nova/logger"
@@ -360,7 +361,7 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params
360361
postClusterCreationTasks.Append(preNodegroupAddons)
361362
}
362363

363-
taskTree := stackManager.NewTasksToCreateCluster(ctx, cfg.NodeGroups, cfg.ManagedNodeGroups, cfg.AccessConfig.AccessEntries, makeAccessEntryCreator(cfg.Metadata.Name, stackManager), postClusterCreationTasks)
364+
taskTree := stackManager.NewTasksToCreateCluster(ctx, cfg.NodeGroups, cfg.ManagedNodeGroups, cfg.AccessConfig, makeAccessEntryCreator(cfg.Metadata.Name, stackManager), postClusterCreationTasks)
364365

365366
logger.Info(taskTree.Describe())
366367
if errs := taskTree.DoAllSync(); len(errs) > 0 {
@@ -426,18 +427,28 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params
426427
} else {
427428
ngCtx, cancel := context.WithTimeout(ctx, cmd.ProviderConfig.WaitTimeout)
428429
defer cancel()
430+
431+
// authorize self-managed nodes to join the cluster via aws-auth configmap
432+
// only if EKS access entries are disabled
433+
if cfg.AccessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap {
434+
if err := eks.UpdateAuthConfigMap(cfg.NodeGroups, clientSet); err != nil {
435+
return err
436+
}
437+
}
438+
429439
for _, ng := range cfg.NodeGroups {
430-
// wait for nodes to join
431440
if err := eks.WaitForNodes(ngCtx, clientSet, ng); err != nil {
432441
return err
433442
}
434443
}
444+
logger.Success("created %d nodegroup(s) in cluster %q", len(cfg.NodeGroups), cfg.Metadata.Name)
435445

436446
for _, ng := range cfg.ManagedNodeGroups {
437447
if err := eks.WaitForNodes(ngCtx, clientSet, ng); err != nil {
438448
return err
439449
}
440450
}
451+
logger.Success("created %d managed nodegroup(s) in cluster %q", len(cfg.ManagedNodeGroups), cfg.Metadata.Name)
441452
}
442453
}
443454
if postNodegroupAddons != nil && postNodegroupAddons.Len() > 0 {

0 commit comments

Comments
 (0)