Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions aws/resources/asg.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package resources

import (
"context"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/autoscaling"
Expand Down Expand Up @@ -72,7 +71,7 @@ func (ag *ASGroups) nukeAll(groupNames []*string) error {
waiter := autoscaling.NewGroupNotExistsWaiter(ag.Client)
err := waiter.Wait(ag.Context, &autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: deletedGroupNames,
}, 5*time.Minute)
}, ag.Timeout)

if err != nil {
logging.Errorf("[Failed] %s", err)
Expand Down
18 changes: 15 additions & 3 deletions aws/resources/base_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ const maxStopRetries = 3
const waitDuration = 5 * time.Second
const stopWaitDuration = 5 * time.Second

// DefaultWaitTimeout is the default timeout for AWS resource deletion waiters
const DefaultWaitTimeout = 5 * time.Minute

// BaseAwsResource struct and its associated methods to serve as a placeholder or template for a resource that is not
// yet fully implemented within a system or framework. Its purpose is to provide a skeleton structure that adheres to a
// specific interface or contract expected by the system without containing the actual implementation details.
Expand All @@ -24,7 +27,6 @@ type BaseAwsResource struct {
Nukables map[string]error
Timeout time.Duration
Context context.Context
cancel context.CancelFunc
}

func (br *BaseAwsResource) Init(cfg aws.Config) {
Expand Down Expand Up @@ -62,8 +64,10 @@ func (br *BaseAwsResource) GetAndSetResourceConfig(_ config.Config) config.Resou
}

func (br *BaseAwsResource) PrepareContext(parentContext context.Context, resourceConfig config.ResourceType) error {
br.Timeout = DefaultWaitTimeout
br.Context = parentContext

if resourceConfig.Timeout == "" {
br.Context = parentContext
return nil
}

Expand All @@ -72,7 +76,14 @@ func (br *BaseAwsResource) PrepareContext(parentContext context.Context, resourc
return err
}

br.Context, _ = context.WithTimeout(parentContext, duration)
br.Timeout = duration
ctx, cancel := context.WithTimeout(parentContext, duration)
br.Context = ctx
// Note: cancel is intentionally not stored or deferred here.
// The context will be cancelled when the parent context is cancelled,
// or when the timeout expires. The parent context is managed by the
// caller and will be cancelled when the nuke operation completes.
_ = cancel
return nil
}

Expand Down Expand Up @@ -107,3 +118,4 @@ func (br *BaseAwsResource) IsNukable(identifier string) (bool, error) {

return true, nil
}

3 changes: 1 addition & 2 deletions aws/resources/ebs.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package resources
import (
"context"
goerr "errors"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/ec2"
Expand Down Expand Up @@ -118,7 +117,7 @@ func (ev *EBSVolumes) nukeAll(volumeIds []*string) error {
waiter := ec2.NewVolumeDeletedWaiter(ev.Client)
err := waiter.Wait(ev.Context, &ec2.DescribeVolumesInput{
VolumeIds: aws.ToStringSlice(deletedVolumeIDs),
}, 5*time.Minute)
}, ev.Timeout)
if err != nil {
logging.Debugf("[Failed] %s", err)
return errors.WithStackTrace(err)
Expand Down
3 changes: 1 addition & 2 deletions aws/resources/ec2.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package resources

import (
"context"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/ec2"
Expand Down Expand Up @@ -161,7 +160,7 @@ func (ei *EC2Instances) nukeAll(instanceIds []*string) error {
Values: aws.ToStringSlice(instanceIds),
},
},
}, 15*time.Minute)
}, ei.Timeout)

if err != nil {
logging.Debugf("[Failed] %s", err)
Expand Down
2 changes: 1 addition & 1 deletion aws/resources/ec2_network_interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (ni *NetworkInterface) nukeInstance(id *string) error {
waiter := ec2.NewInstanceTerminatedWaiter(ni.Client)
err = waiter.Wait(ni.Context, &ec2.DescribeInstancesInput{
InstanceIds: []string{instanceID},
}, 5*time.Minute)
}, ni.Timeout)
if err != nil {
logging.Debugf("[nukeInstance] Instance termination waiting failed for instance %s: %v", instanceID, err)
return errors.WithStackTrace(err)
Expand Down
12 changes: 6 additions & 6 deletions aws/resources/ec2_vpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func (v *EC2VPCs) nukeAll(vpcIds []string) error {
}

var err error
err = nuke(v.Client, v.ELBClient, id)
err = nuke(v.Client, v.ELBClient, id, v.Timeout)

// Record status of this resource
e := report.Entry{
Expand All @@ -111,7 +111,7 @@ func (v *EC2VPCs) nukeAll(vpcIds []string) error {
return nil
}

func nuke(client EC2VPCAPI, elbClient ELBClientAPI, vpcID string) error {
func nuke(client EC2VPCAPI, elbClient ELBClientAPI, vpcID string, waitTimeout time.Duration) error {
var err error
// Note: order is quite important, otherwise you will encounter dependency violation errors.

Expand All @@ -127,7 +127,7 @@ func nuke(client EC2VPCAPI, elbClient ELBClientAPI, vpcID string) error {
return err
}

err = nukeEc2Instances(client, vpcID)
err = nukeEc2Instances(client, vpcID, waitTimeout)
if err != nil {
logging.Debug(fmt.Sprintf("Error nuking instances for VPC %s: %s", vpcID, err.Error()))
return err
Expand Down Expand Up @@ -894,7 +894,7 @@ func nukeTargetGroups(client ELBClientAPI, vpcID string) error {
return nil
}

func nukeEc2Instances(client EC2VPCAPI, vpcID string) error {
func nukeEc2Instances(client EC2VPCAPI, vpcID string, waitTimeout time.Duration) error {
logging.Debug(fmt.Sprintf("Describing instances for %s", vpcID))
output, err := client.DescribeInstances(context.Background(), &ec2.DescribeInstancesInput{
Filters: []types.Filter{
Expand Down Expand Up @@ -933,12 +933,12 @@ func nukeEc2Instances(client EC2VPCAPI, vpcID string) error {
return errors.WithStackTrace(err)
}

// weight for terminate the instances
// wait for terminate the instances
logging.Debug(fmt.Sprintf("waiting for the instance to be terminated for %s", vpcID))
waiter := ec2.NewInstanceTerminatedWaiter(client)
err = waiter.Wait(context.Background(), &ec2.DescribeInstancesInput{
InstanceIds: terminateInstancesIds,
}, 5*time.Minute)
}, waitTimeout)
if err != nil {
logging.Debug(fmt.Sprintf("Failed to wait instance termination for %s", vpcID))
return errors.WithStackTrace(err)
Expand Down
5 changes: 2 additions & 3 deletions aws/resources/ecs_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package resources

import (
"context"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/ecs"
Expand Down Expand Up @@ -160,7 +159,7 @@ func (services *ECSServices) waitUntilServicesDrained(ecsServiceArns []*string)
}

waiter := ecs.NewServicesStableWaiter(services.Client)
err := waiter.Wait(services.Context, params, 15*time.Minute)
err := waiter.Wait(services.Context, params, services.Timeout)
if err != nil {
logging.Debugf("[Failed] Failed waiting for service to be stable %s: %s", *ecsServiceArn, err)
} else {
Expand Down Expand Up @@ -202,7 +201,7 @@ func (services *ECSServices) waitUntilServicesDeleted(ecsServiceArns []*string)
}

waiter := ecs.NewServicesInactiveWaiter(services.Client)
err := waiter.Wait(services.Context, params, 15*time.Minute)
err := waiter.Wait(services.Context, params, services.Timeout)

// Record status of this resource
e := report.Entry{
Expand Down
7 changes: 3 additions & 4 deletions aws/resources/eks.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package resources
import (
"context"
"sync"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/eks"
Expand Down Expand Up @@ -82,7 +81,7 @@ func (clusters *EKSClusters) deleteAsync(wg *sync.WaitGroup, errChan chan error,
err := waiter.Wait(clusters.Context, &eks.DescribeNodegroupInput{
ClusterName: aws.String(eksClusterName),
NodegroupName: nodeGroup,
}, 15*time.Minute)
}, clusters.Timeout)
if err != nil {
logging.Debugf("[Failed] Failed waiting for Node Group %s associated with cluster %s to be deleted: %s", aws.ToString(nodeGroup), eksClusterName, err)
allSubResourceErrs = multierror.Append(allSubResourceErrs, err)
Expand Down Expand Up @@ -185,7 +184,7 @@ func (clusters *EKSClusters) deleteEKSClusterFargateProfiles(eksClusterName stri
waitErr := waiter.Wait(clusters.Context, &eks.DescribeFargateProfileInput{
ClusterName: aws.String(eksClusterName),
FargateProfileName: fargateProfile,
}, 15*time.Minute)
}, clusters.Timeout)
if waitErr != nil {
logging.Debugf("[Failed] Failed waiting for Fargate Profile %s associated with cluster %s to be deleted: %s", aws.ToString(fargateProfile), eksClusterName, waitErr)
allDeleteErrs = multierror.Append(allDeleteErrs, waitErr)
Expand All @@ -206,7 +205,7 @@ func (clusters *EKSClusters) waitUntilEksClustersDeleted(eksClusterNames []*stri
waiter := eks.NewClusterDeletedWaiter(clusters.Client)
err := waiter.Wait(clusters.Context, &eks.DescribeClusterInput{
Name: eksClusterName,
}, 15*time.Minute)
}, clusters.Timeout)

// Record status of this resource
e := report.Entry{
Expand Down
5 changes: 2 additions & 3 deletions aws/resources/elasticache.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"strings"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/elasticache"
Expand Down Expand Up @@ -126,7 +125,7 @@ func (cache *Elasticaches) nukeNonReplicationGroupElasticacheCluster(clusterId *
&elasticache.DescribeCacheClustersInput{
CacheClusterId: clusterId,
},
15*time.Minute,
cache.Timeout,
)
}

Expand All @@ -145,7 +144,7 @@ func (cache *Elasticaches) nukeReplicationGroupMemberElasticacheCluster(clusterI
waitErr := waiter.Wait(
cache.Context,
&elasticache.DescribeReplicationGroupsInput{ReplicationGroupId: clusterId},
15*time.Minute,
cache.Timeout,
)

if waitErr != nil {
Expand Down
3 changes: 1 addition & 2 deletions aws/resources/elbv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package resources

import (
"context"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2"
Expand Down Expand Up @@ -69,7 +68,7 @@ func (balancer *LoadBalancersV2) nukeAll(arns []*string) error {
waiter := elasticloadbalancingv2.NewLoadBalancersDeletedWaiter(balancer.Client)
err := waiter.Wait(balancer.Context, &elasticloadbalancingv2.DescribeLoadBalancersInput{
LoadBalancerArns: aws.ToStringSlice(deletedArns),
}, 15*time.Minute)
}, balancer.Timeout)
if err != nil {
logging.Debugf("[Failed] %s", err)
return errors.WithStackTrace(err)
Expand Down
4 changes: 1 addition & 3 deletions aws/resources/rds.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package resources

import (
"context"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/rds"
Expand Down Expand Up @@ -83,11 +82,10 @@ func (di *DBInstances) nukeAll(names []*string) error {

if len(deletedNames) > 0 {
for _, name := range deletedNames {

waiter := rds.NewDBInstanceDeletedWaiter(di.Client)
err := waiter.Wait(di.Context, &rds.DescribeDBInstancesInput{
DBInstanceIdentifier: name,
}, 1*time.Minute)
}, di.Timeout)

// Record status of this resource
e := report.Entry{
Expand Down
6 changes: 3 additions & 3 deletions aws/resources/rds_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ import (
)

func (instance *DBClusters) waitUntilRdsClusterDeleted(input *rds.DescribeDBClustersInput) error {
const maxRetries = 90 // 90 attempts
const retryInterval = 10 * time.Second // 10 seconds between attempts
// Total wait time: 90 * 10s = 900s = 15 minutes
waitTimeout := instance.Timeout
const retryInterval = 10 * time.Second
maxRetries := int(waitTimeout / retryInterval)

for i := 0; i < maxRetries; i++ {
_, err := instance.Client.DescribeDBClusters(instance.Context, input)
Expand Down
3 changes: 1 addition & 2 deletions aws/resources/redshift.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package resources

import (
"context"
"time"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/redshift"
Expand Down Expand Up @@ -65,7 +64,7 @@ func (rc *RedshiftClusters) nukeAll(identifiers []*string) error {
waiter := redshift.NewClusterDeletedWaiter(rc.Client)
err := waiter.Wait(rc.Context, &redshift.DescribeClustersInput{
ClusterIdentifier: id,
}, 5*time.Minute)
}, rc.Timeout)

// Record status of this resource
e := report.Entry{
Expand Down