From 36a2a815b6cf9b4d1c2518cb491630e159394fb6 Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 22 Jul 2025 16:06:03 +0000 Subject: [PATCH] chore(schema): update --- samtranslator/schema/schema.json | 64 ++-- schema_source/cloudformation-docs.json | 467 +++++++++++++++++------ schema_source/cloudformation.schema.json | 64 ++-- 3 files changed, 421 insertions(+), 174 deletions(-) diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index b14753eca..37f02e3d2 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -18100,7 +18100,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", + "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "title": "InstanceType", "type": "string" }, @@ -18346,7 +18346,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge", + "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge", "title": "InstanceType", "type": "string" }, @@ -32449,7 +32449,7 @@ "type": "array" }, "KeyAlgorithm": { - "markdownDescription": "Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate. For more information about selecting an algorithm, see [Key algorithms](https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate.html#algorithms) .\n\n> Algorithms supported for an ACM certificate request include:\n> \n> - `RSA_2048`\n> - `EC_prime256v1`\n> - `EC_secp384r1`\n> \n> Other listed algorithms are for imported certificates only. > When you request a private PKI certificate signed by a CA from AWS Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. \n\nDefault: RSA_2048", + "markdownDescription": "Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate. For more information about selecting an algorithm, see [Key algorithms](https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate-characteristics.html#algorithms-term) .\n\n> Algorithms supported for an ACM certificate request include:\n> \n> - `RSA_2048`\n> - `EC_prime256v1`\n> - `EC_secp384r1`\n> \n> Other listed algorithms are for imported certificates only. > When you request a private PKI certificate signed by a CA from AWS Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. \n\nDefault: RSA_2048", "title": "KeyAlgorithm", "type": "string" }, @@ -35416,7 +35416,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", + "markdownDescription": "Refines which accounts to deploy stacks to by specifying how to use the `Accounts` and `OrganizationalUnitIds` properties together.\n\nThe following values determine how CloudFormation selects target accounts:\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` property.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` property.\n- `UNION` : StackSet deploys to the OU, and the accounts specified in the `Accounts` property. `UNION` is not supported for create operations when using StackSet as a resource or the `CreateStackInstances` API.", "title": "AccountFilterType", "type": "string" }, @@ -35429,7 +35429,7 @@ "type": "array" }, "AccountsUrl": { - "markdownDescription": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).", + "markdownDescription": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).\n\nThis property serves the same purpose as `Accounts` but allows you to specify a large number of accounts.", "title": "AccountsUrl", "type": "string" }, @@ -35448,7 +35448,7 @@ "additionalProperties": false, "properties": { "Active": { - "markdownDescription": "When `true` , StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order.\n\n> If there are already running or queued operations, StackSets queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your StackSet's execution configuration while there are running or queued operations for that StackSet. \n\nWhen `false` (default), StackSets performs one operation at a time in request order.", + "markdownDescription": "When `true` , CloudFormation performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, CloudFormation starts queued operations in request order.\n\n> If there are already running or queued operations, CloudFormation queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your StackSet's execution configuration while there are running or queued operations for that StackSet. \n\nWhen `false` (default), StackSets performs one operation at a time in request order.", "title": "Active", "type": "boolean" } @@ -35519,7 +35519,7 @@ "properties": { "DeploymentTargets": { "$ref": "#/definitions/AWS::CloudFormation::StackSet.DeploymentTargets", - "markdownDescription": "The AWS `OrganizationalUnitIds` or `Accounts` for which to create stack instances in the specified Regions.", + "markdownDescription": "The AWS Organizations accounts or AWS accounts to deploy stacks to in the specified Regions.", "title": "DeploymentTargets" }, "ParameterOverrides": { @@ -62463,7 +62463,7 @@ "title": "OnPremConfig" }, "ServerHostname": { - "markdownDescription": "Specifies the DNS name or IP version 4 address of the NFS file server that your DataSync agent connects to.", + "markdownDescription": "Specifies the DNS name or IP address (IPv4 or IPv6) of the NFS file server that your DataSync agent connects to.", "title": "ServerHostname", "type": "string" }, @@ -62599,7 +62599,7 @@ "type": "string" }, "ServerHostname": { - "markdownDescription": "Specifies the domain name or IP version 4 (IPv4) address of the object storage server that your DataSync agent connects to.", + "markdownDescription": "Specifies the domain name or IP address (IPv4 or IPv6) of the object storage server that your DataSync agent connects to.", "title": "ServerHostname", "type": "string" }, @@ -62816,7 +62816,7 @@ "type": "string" }, "ServerHostname": { - "markdownDescription": "Specifies the domain name or IP address of the SMB file server that your DataSync agent connects to.\n\nRemember the following when configuring this parameter:\n\n- You can't specify an IP version 6 (IPv6) address.\n- If you're using Kerberos authentication, you must specify a domain name.", + "markdownDescription": "Specifies the domain name or IP address (IPv4 or IPv6) of the SMB file server that your DataSync agent connects to.\n\n> If you're using Kerberos authentication, you must specify a domain name.", "title": "ServerHostname", "type": "string" }, @@ -68374,7 +68374,7 @@ "type": "boolean" }, "InstanceCount": { - "markdownDescription": "The number of instances for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for an instance count with a minimum of 100 vCPUs. For example, if you request a future-dated Capacity Reservation for `m5.xlarge` instances, you must request at least 25 instances ( *25 * m5.xlarge = 100 vCPUs* ). \n\nValid range: 1 - 1000", + "markdownDescription": "The number of instances for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for an instance count with a minimum of 64 vCPUs. For example, if you request a future-dated Capacity Reservation for `m5.xlarge` instances, you must request at least 25 instances ( *16 * m5.xlarge = 64 vCPUs* ). \n\nValid range: 1 - 1000", "title": "InstanceCount", "type": "number" }, @@ -68389,7 +68389,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for instance types in the C, M, R, I, and T instance families only. \n\nFor more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The instance type for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for instance types in the C, M, R, I, T, and G instance families only. \n\nFor more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .", "title": "InstanceType", "type": "string" }, @@ -72277,7 +72277,7 @@ "type": "string" }, "PreserveClientIp": { - "markdownDescription": "Indicates whether the client IP address is preserved as the source. The following are the possible values.\n\n- `true` - Use the client IP address as the source.\n- `false` - Use the network interface IP address as the source.\n\nDefault: `false`", + "markdownDescription": "Indicates whether the client IP address is preserved as the source. The following are the possible values.\n\n- `true` - Use the client IP address as the source.\n- `false` - Use the network interface IP address as the source.\n\n> `PreserveClientIp` is only supported on IPv4 EC2 Instance Connect Endpoints. To use `PreserveClientIp` , the value for `IpAddressType` must be `ipv4` . \n\nDefault: `false`", "title": "PreserveClientIp", "type": "boolean" }, @@ -83486,7 +83486,7 @@ }, "DeploymentController": { "$ref": "#/definitions/AWS::ECS::Service.DeploymentController", - "markdownDescription": "The deployment controller to use for the service. If no deployment controller is specified, the default value of `ECS` is used.", + "markdownDescription": "The deployment controller to use for the service.", "title": "DeploymentController" }, "DesiredCount": { @@ -83495,7 +83495,7 @@ "type": "number" }, "EnableECSManagedTags": { - "markdownDescription": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you need to set the `propagateTags` request parameter.", + "markdownDescription": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you must set the `propagateTags` request parameter.", "title": "EnableECSManagedTags", "type": "boolean" }, @@ -83752,7 +83752,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The deployment controller type to use. There are three deployment controller types available:\n\n- **ECS** - The rolling update ( `ECS` ) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the [DeploymentConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html) .\n- **CODE_DEPLOY** - The blue/green ( `CODE_DEPLOY` ) deployment type uses the blue/green deployment model powered by AWS CodeDeploy , which allows you to verify a new deployment of a service before sending production traffic to it.\n- **EXTERNAL** - The external ( `EXTERNAL` ) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service.", + "markdownDescription": "The deployment controller type to use.\n\nThe deployment controller is the mechanism that determines how tasks are deployed for your service. The valid options are:\n\n- ECS\n\nWhen you create a service which uses the `ECS` deployment controller, you can choose between the following deployment strategies:\n\n- `ROLLING` : When you create a service which uses the *rolling update* ( `ROLLING` ) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.\n\nRolling update deployments are best suited for the following scenarios:\n\n- Gradual service updates: You need to update your service incrementally without taking the entire service offline at once.\n- Limited resource requirements: You want to avoid the additional resource costs of running two complete environments simultaneously (as required by blue/green deployments).\n- Acceptable deployment time: Your application can tolerate a longer deployment process, as rolling updates replace tasks one by one.\n- No need for instant roll back: Your service can tolerate a rollback process that takes minutes rather than seconds.\n- Simple deployment process: You prefer a straightforward deployment approach without the complexity of managing multiple environments, target groups, and listeners.\n- No load balancer requirement: Your service doesn't use or require a load balancer, Application Load Balancer , Network Load Balancer , or Service Connect (which are required for blue/green deployments).\n- Stateful applications: Your application maintains state that makes it difficult to run two parallel environments.\n- Cost sensitivity: You want to minimize deployment costs by not running duplicate environments during deployment.\n\nRolling updates are the default deployment strategy for services and provide a balance between deployment safety and resource efficiency for many common application scenarios.\n- `BLUE_GREEN` : A *blue/green* deployment strategy ( `BLUE_GREEN` ) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed.\n\nAmazon ECS blue/green deployments are best suited for the following scenarios:\n\n- Service validation: When you need to validate new service revisions before directing production traffic to them\n- Zero downtime: When your service requires zero-downtime deployments\n- Instant roll back: When you need the ability to quickly roll back if issues are detected\n- Load balancer requirement: When your service uses Application Load Balancer , Network Load Balancer , or Service Connect\n- External\n\nUse a third-party deployment controller.\n- Blue/green deployment (powered by CodeDeploy )\n\nCodeDeploy installs an updated version of the application as a new replacement task set and reroutes production traffic from the original application task set to the replacement task set. The original task set is terminated after a successful deployment. Use this deployment controller to verify a new deployment of a service before sending production traffic to it.\n\nWhen updating the deployment controller for a service, consider the following depending on the type of migration you're performing.\n\n- If you have a template that contains the `EXTERNAL` deployment controller information as well as `TaskSet` and `PrimaryTaskSet` resources, and you remove the task set resources from the template when updating from `EXTERNAL` to `ECS` , the `DescribeTaskSet` and `DeleteTaskSet` API calls will return a 400 error after the deployment controller is updated to `ECS` . This results in a delete failure on the task set resources, even though the stack transitions to `UPDATE_COMPLETE` status. For more information, see [Resource removed from stack but not deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-resource-removed-not-deleted) in the AWS CloudFormation User Guide. To fix this issue, delete the task sets directly using the Amazon ECS `DeleteTaskSet` API. For more information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the Amazon Elastic Container Service API Reference.\n- If you're migrating from `CODE_DEPLOY` to `ECS` with a new task definition and AWS CloudFormation performs a rollback operation, the Amazon ECS `UpdateService` request fails with the following error:\n\nResource handler returned message: \"Invalid request provided: Unable to update task definition on services with a CODE_DEPLOY deployment controller.\n- After a successful migration from `ECS` to `EXTERNAL` deployment controller, you need to manually remove the `ACTIVE` task set, because Amazon ECS no longer manages the deployment. For information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the Amazon Elastic Container Service API Reference.", "title": "Type", "type": "string" } @@ -130113,7 +130113,7 @@ "type": "string" }, "CapabilityNamespace": { - "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .", + "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC UA sources for an MQTT-enabled gateway, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:3` .", "title": "CapabilityNamespace", "type": "string" } @@ -163355,7 +163355,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags associated with the channel.", + "markdownDescription": "", "title": "Tags", "type": "array" } @@ -163787,7 +163787,7 @@ }, "FilterConfiguration": { "$ref": "#/definitions/AWS::MediaPackageV2::OriginEndpoint.FilterConfiguration", - "markdownDescription": "", + "markdownDescription": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", "title": "FilterConfiguration" }, "ManifestName": { @@ -163831,7 +163831,7 @@ }, "FilterConfiguration": { "$ref": "#/definitions/AWS::MediaPackageV2::OriginEndpoint.FilterConfiguration", - "markdownDescription": "", + "markdownDescription": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", "title": "FilterConfiguration" }, "ManifestName": { @@ -171432,7 +171432,7 @@ "type": "object" }, "StorageCapacity": { - "markdownDescription": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version.", + "markdownDescription": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version. The `storageCapacity` can be overwritten at run time. The storage capacity is not required for runs with a `DYNAMIC` storage type.", "title": "StorageCapacity", "type": "number" }, @@ -174262,7 +174262,7 @@ "type": "array" }, "ServiceRoleArn": { - "markdownDescription": "The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.", + "markdownDescription": "The service role that the AWS OpsWorks CM service backend uses to work with your account.", "title": "ServiceRoleArn", "type": "string" }, @@ -224056,7 +224056,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Specifies from which source accounts the service principal has access to the resources in this resource share.", "title": "Sources", "type": "array" }, @@ -224501,7 +224501,7 @@ "type": "string" }, "SourceDBClusterIdentifier": { - "markdownDescription": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n- Cannot be specified if `SourceDbClusterResourceId` is specified. You must specify either `SourceDBClusterIdentifier` or `SourceDbClusterResourceId` , but not both.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "title": "SourceDBClusterIdentifier", "type": "string" }, @@ -235256,7 +235256,7 @@ "items": { "$ref": "#/definitions/AWS::S3::Bucket.InventoryConfiguration" }, - "markdownDescription": "Specifies the inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", + "markdownDescription": "Specifies the S3 Inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", "title": "InventoryConfigurations", "type": "array" }, @@ -240908,7 +240908,7 @@ "type": "string" }, "InstanceId": { - "markdownDescription": "The ID of the instance that the SSM document is associated with. You must specify the `InstanceId` or `Targets` property.\n\n> `InstanceId` has been deprecated. To specify an instance ID for an association, use the `Targets` parameter. If you use the parameter `InstanceId` , you cannot use the parameters `AssociationName` , `DocumentVersion` , `MaxErrors` , `MaxConcurrency` , `OutputLocation` , or `ScheduleExpression` . To use these parameters, you must use the `Targets` parameter.", + "markdownDescription": "> `InstanceId` has been deprecated. To specify an instance ID for an association, use the `Targets` parameter. If you use the parameter `InstanceId` , you cannot use the parameters `AssociationName` , `DocumentVersion` , `MaxErrors` , `MaxConcurrency` , `OutputLocation` , or `ScheduleExpression` . To use these parameters, you must use the `Targets` parameter.\n> \n> Note that in some examples later in this page, `InstanceIds` is used as the tag-key name in a `Targets` filter. `InstanceId` is not used as a parameter. \n\nThe ID of the instance that the SSM document is associated with. You must specify the `InstanceId` or `Targets` property.", "title": "InstanceId", "type": "string" }, @@ -241499,7 +241499,7 @@ }, "LoggingInfo": { "$ref": "#/definitions/AWS::SSM::MaintenanceWindowTask.LoggingInfo", - "markdownDescription": "Information about an Amazon S3 bucket to write Run Command task-level logs to.\n\n> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs for Run Command tasks, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS ::SSM::MaintenanceWindowTask MaintenanceWindowRunCommandParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-maintenancewindowruncommandparameters.html) .", + "markdownDescription": "> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs for Run Command tasks, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS ::SSM::MaintenanceWindowTask MaintenanceWindowRunCommandParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-maintenancewindowruncommandparameters.html) . \n\nInformation about an Amazon S3 bucket to write Run Command task-level logs to.", "title": "LoggingInfo" }, "MaxConcurrency": { @@ -241546,7 +241546,7 @@ "title": "TaskInvocationParameters" }, "TaskParameters": { - "markdownDescription": "The parameters to pass to the task when it runs.\n\n> `TaskParameters` has been deprecated. To specify parameters to pass to a task when it runs, instead use the `Parameters` option in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [MaintenanceWindowTaskInvocationParameters](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowTaskInvocationParameters.html) .", + "markdownDescription": "> `TaskParameters` has been deprecated. To specify parameters to pass to a task when it runs, instead use the `Parameters` option in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [MaintenanceWindowTaskInvocationParameters](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowTaskInvocationParameters.html) . \n\nThe parameters to pass to the task when it runs.", "title": "TaskParameters", "type": "object" }, @@ -241872,7 +241872,7 @@ "type": "string" }, "Name": { - "markdownDescription": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter Amazon Resource Name (ARN), is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", + "markdownDescription": "The name of the parameter.\n\n> The reported maximum length of 2048 characters for a parameter name includes 1037 characters that are reserved for internal use by Systems Manager . The maximum length for a parameter name that you specify is 1011 characters.\n> \n> This count of 1011 characters includes the characters in the ARN that precede the name you specify. This ARN length will vary depending on your partition and Region. For example, the following 45 characters count toward the 1011 character maximum for a parameter created in the US East (Ohio) Region: `arn:aws:ssm:us-east-2:111122223333:parameter/` .", "title": "Name", "type": "string" }, @@ -242119,7 +242119,7 @@ "additionalProperties": false, "properties": { "Configuration": { - "markdownDescription": "The value of the yum repo configuration. For example:\n\n`[main]`\n\n`name=MyCustomRepository`\n\n`baseurl=https://my-custom-repository`\n\n`enabled=1`\n\n> For information about other options available for your yum repository configuration, see [dnf.conf(5)](https://docs.aws.amazon.com/https://man7.org/linux/man-pages/man5/dnf.conf.5.html) .", + "markdownDescription": "The value of the repo configuration.\n\n*Example for yum repositories*\n\n`[main]`\n\n`name=MyCustomRepository`\n\n`baseurl=https://my-custom-repository`\n\n`enabled=1`\n\nFor information about other options available for your yum repository configuration, see [dnf.conf(5)](https://docs.aws.amazon.com/https://man7.org/linux/man-pages/man5/dnf.conf.5.html) on the *man7.org* website.\n\n*Examples for Ubuntu Server and Debian Server*\n\n`deb http://security.ubuntu.com/ubuntu jammy main`\n\n`deb https://site.example.com/debian distribution component1 component2 component3`\n\nRepo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see [jammy (5) sources.list.5.gz](https://docs.aws.amazon.com/https://manpages.ubuntu.com/manpages/jammy/man5/sources.list.5.html) on the *Ubuntu Server Manuals* website and [sources.list format](https://docs.aws.amazon.com/https://wiki.debian.org/SourcesList#sources.list_format) on the *Debian Wiki* .", "title": "Configuration", "type": "string" }, @@ -263060,7 +263060,7 @@ }, "S3StorageOptions": { "$ref": "#/definitions/AWS::Transfer::Server.S3StorageOptions", - "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", + "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized.\n\n- If using the console, this is enabled by default.\n- If using the API or CLI, this is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", "title": "S3StorageOptions" }, "SecurityPolicyName": { @@ -263226,7 +263226,7 @@ "additionalProperties": false, "properties": { "DirectoryListingOptimization": { - "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", + "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized.\n\n- If using the console, this is enabled by default.\n- If using the API or CLI, this is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", "title": "DirectoryListingOptimization", "type": "string" } diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 47eb3081d..ac389f536 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -205,31 +205,31 @@ "SourceAccount": "The ID of the account that assigned the permission." }, "AWS::AIOps::InvestigationGroup": { - "ChatbotNotificationChannels": "Use this property to integrate Amazon Q Developer operational investigations with Amazon Q in chat applications. This property is an array. For the first string, specify the ARN of an Amazon SNS topic. For the array of strings, specify the ARNs of one or more Amazon Q in chat applications configurations that you want to associate with that topic. For more information about these configuration ARNs, see [Getting started with Amazon Q in chat applications](https://docs.aws.amazon.com/chatbot/latest/adminguide/getting-started.html) and [Resource type defined by AWS Chatbot](https://docs.aws.amazon.com/service-authorization/latest/reference/list_awschatbot.html#awschatbot-resources-for-iam-policies) .", - "CrossAccountConfigurations": "", - "EncryptionConfig": "Use this property to specify a customer managed AWS KMS key to encrypt your investigation data. If you omit this property, Amazon Q Developer operational investigations will use an AWS key to encrypt the data. For more information, see [Encryption of investigation data](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Investigations-Security.html#Investigations-KMS) .", - "InvestigationGroupPolicy": "", - "IsCloudTrailEventHistoryEnabled": "Specify `true` to enable Amazon Q Developer operational investigations to have access to change events that are recorded by CloudTrail . The default is `true` .", - "Name": "A name for the investigation group.", - "RetentionInDays": "Specify how long that investigation data is kept. For more information, see [Operational investigation data retention](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Investigations-Retention.html) .\n\nIf you omit this parameter, the default of 90 days is used.", - "RoleArn": "Specify the ARN of the IAM role that Amazon Q Developer operational investigations will use when it gathers investigation data. The permissions in this role determine which of your resources that Amazon Q Developer operational investigations will have access to during investigations.\n\nFor more information, see [How to control what data Amazon Q has access to during investigations](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Investigations-Security.html#Investigations-Security-Data) .", - "TagKeyBoundaries": "Enter the existing custom tag keys for custom applications in your system. Resource tags help Amazon Q narrow the search space when it is unable to discover definite relationships between resources. For example, to discover that an Amazon ECS service depends on an Amazon RDS database, Amazon Q can discover this relationship using data sources such as X-Ray and CloudWatch Application Signals. However, if you haven't deployed these features, Amazon Q will attempt to identify possible relationships. Tag boundaries can be used to narrow the resources that will be discovered by Amazon Q in these cases.\n\nYou don't need to enter tags created by myApplications or AWS CloudFormation , because Amazon Q can automatically detect those tags.", - "Tags": "A list of key-value pairs to associate with the investigation group. You can associate as many as 50 tags with an investigation group.\n\nTags can help you organize and categorize your resources." + "ChatbotNotificationChannels": "Use this property to integrate CloudWatch investigations with chat applications. This property is an array. For the first string, specify the ARN of an Amazon SNS topic. For the array of strings, specify the ARNs of one or more chat applications configurations that you want to associate with that topic. For more information about these configuration ARNs, see [Getting started with Amazon Q in chat applications](https://docs.aws.amazon.com/chatbot/latest/adminguide/getting-started.html) and [Resource type defined by AWS Chatbot](https://docs.aws.amazon.com/service-authorization/latest/reference/list_awschatbot.html#awschatbot-resources-for-iam-policies) .", + "CrossAccountConfigurations": "List of `sourceRoleArn` values that have been configured for cross-account access.", + "EncryptionConfig": "Specifies the customer managed AWS KMS key that the investigation group uses to encrypt data, if there is one. If not, the investigation group uses an AWS key to encrypt the data.", + "InvestigationGroupPolicy": "Returns the JSON of the IAM resource policy associated with the specified investigation group in a string. For example, `{\\\"Version\\\":\\\"2012-10-17\\\",\\\"Statement\\\":[{\\\"Effect\\\":\\\"Allow\\\",\\\"Principal\\\":{\\\"Service\\\":\\\"aiops.alarms.cloudwatch.amazonaws.com\\\"},\\\"Action\\\":[\\\"aiops:CreateInvestigation\\\",\\\"aiops:CreateInvestigationEvent\\\"],\\\"Resource\\\":\\\"*\\\",\\\"Condition\\\":{\\\"StringEquals\\\":{\\\"aws:SourceAccount\\\":\\\"111122223333\\\"},\\\"ArnLike\\\":{\\\"aws:SourceArn\\\":\\\"arn:aws:cloudwatch:us-east-1:111122223333:alarm:*\\\"}}}]}` .", + "IsCloudTrailEventHistoryEnabled": "Specify `true` to enable CloudWatch investigations to have access to change events that are recorded by CloudTrail. The default is `true` .", + "Name": "Specify either the name or the ARN of the investigation group that you want to view. This is used to set the name of the investigation group.", + "RetentionInDays": "Specifies how long that investigation data is kept.", + "RoleArn": "The ARN of the IAM role that the investigation group uses for permissions to gather data.", + "TagKeyBoundaries": "Displays the custom tag keys for custom applications in your system that you have specified in the investigation group. Resource tags help CloudWatch investigations narrow the search space when it is unable to discover definite relationships between resources.", + "Tags": "The list of key-value pairs to associate with the resource." }, "AWS::AIOps::InvestigationGroup ChatbotNotificationChannel": { - "ChatConfigurationArns": "", - "SNSTopicArn": "" + "ChatConfigurationArns": "Returns the Amazon Resource Name (ARN) of any third-party chat integrations configured for the account.", + "SNSTopicArn": "Returns the ARN of an Amazon SNS topic used for third-party chat integrations." }, "AWS::AIOps::InvestigationGroup CrossAccountConfiguration": { - "SourceRoleArn": "" + "SourceRoleArn": "The ARN of an existing role which will be used to do investigations on your behalf." }, "AWS::AIOps::InvestigationGroup EncryptionConfigMap": { - "EncryptionConfigurationType": "", + "EncryptionConfigurationType": "Displays whether investigation data is encrypted by a customer managed key or an AWS owned key.", "KmsKeyId": "If the investigation group uses a customer managed key for encryption, this field displays the ID of that key." }, "AWS::AIOps::InvestigationGroup Tag": { - "Key": "A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources, including investigation groups.", - "Value": "The value for the specified tag key." + "Key": "Assigns one or more tags (key-value pairs) to the specified resource.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.\n\nTags don't have any semantic meaning to AWS and are interpreted strictly as strings of characters.\n\nYou can associate as many as 50 tags with a resource.", + "Value": "A list of key-value pairs to associate with the investigation group. You can associate as many as 50 tags with an investigation group. To be able to associate tags when you create the investigation group, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values." }, "AWS::APS::RuleGroupsNamespace": { "Data": "The rules file used in the namespace.\n\nFor more details about the rules file, see [Creating a rules file](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-ruler-rulesfile.html) in the *Amazon Managed Service for Prometheus User Guide* .", @@ -443,7 +443,6 @@ "AWS::AmazonMQ::Broker User": { "ConsoleAccess": "Enables access to the ActiveMQ web console for the ActiveMQ user. Does not apply to RabbitMQ brokers.", "Groups": "The list of groups (20 maximum) to which the ActiveMQ user belongs. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 2-100 characters long. Does not apply to RabbitMQ brokers.", - "JolokiaApiAccess": "", "Password": "The password of the user. This value must be at least 12 characters long, must contain at least 4 unique characters, and must not contain commas, colons, or equal signs (,:=).", "ReplicationUser": "Defines if this user is intended for CRDR replication purposes.", "Username": "The username of the broker user. For Amazon MQ for ActiveMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). For Amazon MQ for RabbitMQ brokers, this value can contain only alphanumeric characters, dashes, periods, underscores (- . _). This value must not contain a tilde (~) character. Amazon MQ prohibts using guest as a valid usename. This value must be 2-100 characters long.\n\n> Do not add personally identifiable information (PII) or other confidential or sensitive information in broker usernames. Broker usernames are accessible to other AWS services, including CloudWatch Logs . Broker usernames are not intended to be used for private or sensitive data." @@ -2971,8 +2970,8 @@ "TagItems": "The items of the tag." }, "AWS::AppStream::AppBlock TagItems": { - "Key": "", - "Value": "" + "Key": "The key of the tag items.", + "Value": "The value of the tag items." }, "AWS::AppStream::AppBlockBuilder": { "AccessEndpoints": "The access endpoints of the app block builder.", @@ -3021,8 +3020,8 @@ "TagItems": "The items of the tag." }, "AWS::AppStream::Application TagItems": { - "Key": "", - "Value": "" + "Key": "The key of the tag items.", + "Value": "The value of the tag items." }, "AWS::AppStream::ApplicationEntitlementAssociation": { "ApplicationIdentifier": "The identifier of the application.", @@ -3070,7 +3069,7 @@ "IdleDisconnectTimeoutInSeconds": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", "ImageArn": "The ARN of the public, private, or shared image to use.", "ImageName": "The name of the image used to create the fleet.", - "InstanceType": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", + "InstanceType": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "MaxConcurrentSessions": "The maximum number of concurrent sessions that can be run on an Elastic fleet. This setting is required for Elastic fleets, but is not used for other fleet types.", "MaxSessionsPerInstance": "Max number of user sessions on an instance. This is applicable only for multi-session fleets.", "MaxUserDurationInSeconds": "The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.\n\nSpecify a value between 600 and 432000.", @@ -3112,7 +3111,7 @@ "IamRoleArn": "The ARN of the IAM role that is applied to the image builder. To assume a role, the image builder calls the AWS Security Token Service `AssumeRole` API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the *appstream_machine_role* credential profile on the instance.\n\nFor more information, see [Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances](https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) in the *Amazon AppStream 2.0 Administration Guide* .", "ImageArn": "The ARN of the public, private, or shared image to use.", "ImageName": "The name of the image used to create the image builder.", - "InstanceType": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge", + "InstanceType": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge", "Name": "A unique name for the image builder.", "Tags": "An array of key-value pairs.", "VpcConfig": "The VPC configuration for the image builder. You can specify only one subnet." @@ -5852,7 +5851,7 @@ "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." }, "AWS::Bedrock::Flow FieldForReranking": { - "FieldName": "The name of a metadata field to include in or exclude from consideration when reranking." + "FieldName": "The name of the metadata field to be used during the reranking process." }, "AWS::Bedrock::Flow FlowCondition": { "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", @@ -5960,8 +5959,8 @@ "Definition": "The definition of the DoWhile loop nodes and connections between nodes in the flow." }, "AWS::Bedrock::Flow MetadataConfigurationForReranking": { - "SelectionMode": "Specifies whether to consider all metadata when reranking, or only the metadata that you select. If you specify `SELECTIVE` , include the `selectiveModeConfiguration` field.", - "SelectiveModeConfiguration": "Contains configurations for the metadata fields to include or exclude when considering reranking." + "SelectionMode": "The mode for selecting which metadata fields to include in the reranking process. Valid values are ALL (use all available metadata fields) or SELECTIVE (use only specified fields).", + "SelectiveModeConfiguration": "Configuration for selective mode, which allows you to explicitly include or exclude specific metadata fields during reranking. This is only used when selectionMode is set to SELECTIVE." }, "AWS::Bedrock::Flow PerformanceConfiguration": { "Latency": "To use a latency-optimized version of the model, set to `optimized` ." @@ -5999,8 +5998,8 @@ "Text": "Contains configurations for the text in a message for a prompt." }, "AWS::Bedrock::Flow RerankingMetadataSelectiveModeConfiguration": { - "FieldsToExclude": "An array of objects, each of which specifies a metadata field to exclude from consideration when reranking.", - "FieldsToInclude": "An array of objects, each of which specifies a metadata field to include in consideration when reranking. The remaining metadata fields are ignored." + "FieldsToExclude": "A list of metadata field names to explicitly exclude from the reranking process. All metadata fields except these will be considered when reordering search results. This parameter cannot be used together with fieldsToInclude.", + "FieldsToInclude": "A list of metadata field names to explicitly include in the reranking process. Only these fields will be considered when reordering search results. This parameter cannot be used together with fieldsToExclude." }, "AWS::Bedrock::Flow RetrievalFlowNodeConfiguration": { "ServiceConfiguration": "Contains configurations for the service to use for retrieving data to return as the output from the node." @@ -6030,17 +6029,17 @@ "Text": "The message for the prompt." }, "AWS::Bedrock::Flow VectorSearchBedrockRerankingConfiguration": { - "MetadataConfiguration": "Contains configurations for the metadata to use in reranking.", - "ModelConfiguration": "Contains configurations for the reranker model.", - "NumberOfRerankedResults": "The number of results to return after reranking." + "MetadataConfiguration": "Configuration for how document metadata should be used during the reranking process. This determines which metadata fields are included when reordering search results.", + "ModelConfiguration": "Configuration for the Amazon Bedrock foundation model used for reranking. This includes the model ARN and any additional request fields required by the model.", + "NumberOfRerankedResults": "The maximum number of results to rerank. This limits how many of the initial vector search results will be processed by the reranking model. A smaller number improves performance but may exclude potentially relevant results." }, "AWS::Bedrock::Flow VectorSearchBedrockRerankingModelConfiguration": { - "AdditionalModelRequestFields": "A JSON object whose keys are request fields for the model and whose values are values for those fields.", - "ModelArn": "The ARN of the reranker model to use." + "AdditionalModelRequestFields": "A list of additional fields to include in the model request during reranking. These fields provide extra context or configuration options specific to the selected foundation model.", + "ModelArn": "The Amazon Resource Name (ARN) of the foundation model to use for reranking. This model processes the query and search results to determine a more relevant ordering." }, "AWS::Bedrock::Flow VectorSearchRerankingConfiguration": { - "BedrockRerankingConfiguration": "Contains configurations for an Amazon Bedrock reranker model.", - "Type": "The type of reranker model." + "BedrockRerankingConfiguration": "Configuration for using Amazon Bedrock foundation models to rerank search results. This is required when the reranking type is set to BEDROCK.", + "Type": "The type of reranking to apply to vector search results. Currently, the only supported value is BEDROCK, which uses Amazon Bedrock foundation models for reranking." }, "AWS::Bedrock::FlowAlias": { "ConcurrencyConfiguration": "The configuration that specifies how nodes in the flow are executed concurrently.", @@ -6068,7 +6067,7 @@ "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." }, "AWS::Bedrock::FlowVersion FieldForReranking": { - "FieldName": "The name of a metadata field to include in or exclude from consideration when reranking." + "FieldName": "The name of the metadata field to be used during the reranking process." }, "AWS::Bedrock::FlowVersion FlowCondition": { "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", @@ -6172,8 +6171,8 @@ "Definition": "The definition of the DoWhile loop nodes and connections between nodes in the flow." }, "AWS::Bedrock::FlowVersion MetadataConfigurationForReranking": { - "SelectionMode": "Specifies whether to consider all metadata when reranking, or only the metadata that you select. If you specify `SELECTIVE` , include the `selectiveModeConfiguration` field.", - "SelectiveModeConfiguration": "Contains configurations for the metadata fields to include or exclude when considering reranking." + "SelectionMode": "The mode for selecting which metadata fields to include in the reranking process. Valid values are ALL (use all available metadata fields) or SELECTIVE (use only specified fields).", + "SelectiveModeConfiguration": "Configuration for selective mode, which allows you to explicitly include or exclude specific metadata fields during reranking. This is only used when selectionMode is set to SELECTIVE." }, "AWS::Bedrock::FlowVersion PerformanceConfiguration": { "Latency": "To use a latency-optimized version of the model, set to `optimized` ." @@ -6211,8 +6210,8 @@ "Text": "Contains configurations for the text in a message for a prompt." }, "AWS::Bedrock::FlowVersion RerankingMetadataSelectiveModeConfiguration": { - "FieldsToExclude": "An array of objects, each of which specifies a metadata field to exclude from consideration when reranking.", - "FieldsToInclude": "An array of objects, each of which specifies a metadata field to include in consideration when reranking. The remaining metadata fields are ignored." + "FieldsToExclude": "A list of metadata field names to explicitly exclude from the reranking process. All metadata fields except these will be considered when reordering search results. This parameter cannot be used together with fieldsToInclude.", + "FieldsToInclude": "A list of metadata field names to explicitly include in the reranking process. Only these fields will be considered when reordering search results. This parameter cannot be used together with fieldsToExclude." }, "AWS::Bedrock::FlowVersion RetrievalFlowNodeConfiguration": { "ServiceConfiguration": "Contains configurations for the service to use for retrieving data to return as the output from the node." @@ -6237,17 +6236,17 @@ "Text": "The message for the prompt." }, "AWS::Bedrock::FlowVersion VectorSearchBedrockRerankingConfiguration": { - "MetadataConfiguration": "Contains configurations for the metadata to use in reranking.", - "ModelConfiguration": "Contains configurations for the reranker model.", - "NumberOfRerankedResults": "The number of results to return after reranking." + "MetadataConfiguration": "Configuration for how document metadata should be used during the reranking process. This determines which metadata fields are included when reordering search results.", + "ModelConfiguration": "Configuration for the Amazon Bedrock foundation model used for reranking. This includes the model ARN and any additional request fields required by the model.", + "NumberOfRerankedResults": "The maximum number of results to rerank. This limits how many of the initial vector search results will be processed by the reranking model. A smaller number improves performance but may exclude potentially relevant results." }, "AWS::Bedrock::FlowVersion VectorSearchBedrockRerankingModelConfiguration": { - "AdditionalModelRequestFields": "A JSON object whose keys are request fields for the model and whose values are values for those fields.", - "ModelArn": "The ARN of the reranker model to use." + "AdditionalModelRequestFields": "A list of additional fields to include in the model request during reranking. These fields provide extra context or configuration options specific to the selected foundation model.", + "ModelArn": "The Amazon Resource Name (ARN) of the foundation model to use for reranking. This model processes the query and search results to determine a more relevant ordering." }, "AWS::Bedrock::FlowVersion VectorSearchRerankingConfiguration": { - "BedrockRerankingConfiguration": "Contains configurations for an Amazon Bedrock reranker model.", - "Type": "The type of reranker model." + "BedrockRerankingConfiguration": "Configuration for using Amazon Bedrock foundation models to rerank search results. This is required when the reranking type is set to BEDROCK.", + "Type": "The type of reranking to apply to vector search results. Currently, the only supported value is BEDROCK, which uses Amazon Bedrock foundation models for reranking." }, "AWS::Bedrock::Guardrail": { "BlockedInputMessaging": "The message to return when the guardrail blocks a prompt.", @@ -6643,7 +6642,7 @@ "AWS::Bedrock::Prompt ToolChoice": { "Any": "The model must request at least one tool (no text is generated).", "Auto": "(Default). The Model automatically decides if a tool should be called or whether to generate text instead.", - "Tool": "The Model must request the specified tool. Only supported by Anthropic Claude 3 models." + "Tool": "The Model must request the specified tool. Only supported by Anthropic Claude 3 and Amazon Nova models." }, "AWS::Bedrock::Prompt ToolConfiguration": { "ToolChoice": "If supported by model, forces the model to request a tool.", @@ -6734,7 +6733,7 @@ "AWS::Bedrock::PromptVersion ToolChoice": { "Any": "The model must request at least one tool (no text is generated).", "Auto": "(Default). The Model automatically decides if a tool should be called or whether to generate text instead.", - "Tool": "The Model must request the specified tool. Only supported by Anthropic Claude 3 models." + "Tool": "The Model must request the specified tool. Only supported by Anthropic Claude 3 and Amazon Nova models." }, "AWS::Bedrock::PromptVersion ToolConfiguration": { "ToolChoice": "If supported by model, forces the model to request a tool.", @@ -6748,6 +6747,29 @@ "InputSchema": "The input schema for the tool in JSON format.", "Name": "The name for the tool." }, + "AWS::Billing::BillingView": { + "DataFilterExpression": "See [Expression](https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_billing_Expression.html) . Billing view only supports `LINKED_ACCOUNT` and `Tags` .", + "Description": "The description of the billing view.", + "Name": "The name of the billing view.", + "SourceViews": "A list of billing views used as the data source for the custom billing view.", + "Tags": "A list of key value map specifying tags associated to the billing view being created." + }, + "AWS::Billing::BillingView DataFilterExpression": { + "Dimensions": "The specific `Dimension` to use for `Expression` .", + "Tags": "The specific `Tag` to use for `Expression` ." + }, + "AWS::Billing::BillingView Dimensions": { + "Key": "The key that's associated with the tag.", + "Values": "The metadata that you can use to filter and group your results." + }, + "AWS::Billing::BillingView Tag": { + "Key": "A list of tag key value pairs that are associated with the resource.", + "Value": "The metadata that you can use to filter and group your results." + }, + "AWS::Billing::BillingView Tags": { + "Key": "A list of tag key value pairs that are associated with the resource.", + "Values": "The metadata values that you can use to filter and group your results." + }, "AWS::BillingConductor::BillingGroup": { "AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", "ComputationPreference": "The preferences and settings that will be used to compute the AWS charges for a billing group.", @@ -7119,10 +7141,11 @@ }, "AWS::CertificateManager::Certificate": { "CertificateAuthorityArn": "The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the [AWS Private Certificate Authority](https://docs.aws.amazon.com/privateca/latest/userguide/PcaWelcome.html) user guide. The ARN must have the following form:\n\n`arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012`", + "CertificateExport": "You can opt out of allowing export of your certificate by specifying the `DISABLED` option. Allow export of your certificate by specifying the `ENABLED` option.\n\nIf you do not specify an export preference in a new CloudFormation template, it is the same as explicitly denying export of your certificate.", "CertificateTransparencyLoggingPreference": "You can opt out of certificate transparency logging by specifying the `DISABLED` option. Opt in by specifying `ENABLED` .\n\nIf you do not specify a certificate transparency logging preference on a new CloudFormation template, or if you remove the logging preference from an existing template, this is the same as explicitly enabling the preference.\n\nChanging the certificate transparency logging preference will update the existing resource by calling `UpdateCertificateOptions` on the certificate. This action will not create a new resource.", "DomainName": "The fully qualified domain name (FQDN), such as www.example.com, with which you want to secure an ACM certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, `*.example.com` protects `www.example.com` , `site.example.com` , and `images.example.com.`", "DomainValidationOptions": "Domain information that domain name registrars use to verify your identity.\n\n> In order for a AWS::CertificateManager::Certificate to be provisioned and validated in CloudFormation automatically, the `DomainName` property needs to be identical to one of the `DomainName` property supplied in DomainValidationOptions, if the ValidationMethod is **DNS**. Failing to keep them like-for-like will result in failure to create the domain validation records in Route53.", - "KeyAlgorithm": "Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate. For more information about selecting an algorithm, see [Key algorithms](https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate.html#algorithms) .\n\n> Algorithms supported for an ACM certificate request include:\n> \n> - `RSA_2048`\n> - `EC_prime256v1`\n> - `EC_secp384r1`\n> \n> Other listed algorithms are for imported certificates only. > When you request a private PKI certificate signed by a CA from AWS Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. \n\nDefault: RSA_2048", + "KeyAlgorithm": "Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate. For more information about selecting an algorithm, see [Key algorithms](https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate-characteristics.html#algorithms-term) .\n\n> Algorithms supported for an ACM certificate request include:\n> \n> - `RSA_2048`\n> - `EC_prime256v1`\n> - `EC_secp384r1`\n> \n> Other listed algorithms are for imported certificates only. > When you request a private PKI certificate signed by a CA from AWS Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. \n\nDefault: RSA_2048", "SubjectAlternativeNames": "Additional FQDNs to be included in the Subject Alternative Name extension of the ACM certificate. For example, you can add www.example.net to a certificate for which the `DomainName` field is www.example.com if users can reach your site by using either name.", "Tags": "Key-value pairs that can identify the certificate.", "ValidationMethod": "The method you want to use to validate that you own or control the domain associated with a public certificate. You can [validate with DNS](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html) or [validate with email](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-email.html) . We recommend that you use DNS validation.\n\nIf not specified, this property defaults to email validation." @@ -7245,7 +7268,7 @@ "Value": "The value of the tag." }, "AWS::CleanRooms::Collaboration": { - "AnalyticsEngine": "The analytics engine for the collaboration.", + "AnalyticsEngine": "The analytics engine for the collaboration.\n\n> After July 16, 2025, the `CLEAN_ROOMS_SQL` parameter will no longer be available.", "CreatorDisplayName": "A display name of the collaboration creator.", "CreatorMLMemberAbilities": "The ML member abilities for a collaboration member.", "CreatorMemberAbilities": "The abilities granted to the collaboration creator.\n\n*Allowed values* `CAN_QUERY` | `CAN_RECEIVE_RESULTS` | `CAN_RUN_JOB`", @@ -7795,13 +7818,13 @@ "RetainStacksOnAccountRemoval": "If set to `true` , stack resources are retained when an account is removed from a target organization or OU. If set to `false` , stack resources are deleted. Specify only if `Enabled` is set to `True` ." }, "AWS::CloudFormation::StackSet DeploymentTargets": { - "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", + "AccountFilterType": "Refines which accounts to deploy stacks to by specifying how to use the `Accounts` and `OrganizationalUnitIds` properties together.\n\nThe following values determine how CloudFormation selects target accounts:\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` property.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` property.\n- `UNION` : StackSet deploys to the OU, and the accounts specified in the `Accounts` property. `UNION` is not supported for create operations when using StackSet as a resource or the `CreateStackInstances` API.", "Accounts": "The account IDs of the AWS accounts . If you have many account numbers, you can provide those accounts using the `AccountsUrl` property instead.\n\n*Pattern* : `^[0-9]{12}$`", - "AccountsUrl": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).", + "AccountsUrl": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).\n\nThis property serves the same purpose as `Accounts` but allows you to specify a large number of accounts.", "OrganizationalUnitIds": "The organization root ID or organizational unit (OU) IDs.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`" }, "AWS::CloudFormation::StackSet ManagedExecution": { - "Active": "When `true` , StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order.\n\n> If there are already running or queued operations, StackSets queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your StackSet's execution configuration while there are running or queued operations for that StackSet. \n\nWhen `false` (default), StackSets performs one operation at a time in request order." + "Active": "When `true` , CloudFormation performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, CloudFormation starts queued operations in request order.\n\n> If there are already running or queued operations, CloudFormation queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your StackSet's execution configuration while there are running or queued operations for that StackSet. \n\nWhen `false` (default), StackSets performs one operation at a time in request order." }, "AWS::CloudFormation::StackSet OperationPreferences": { "ConcurrencyMode": "Specifies how the concurrency level behaves during the operation execution.\n\n- `STRICT_FAILURE_TOLERANCE` : This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of `FailureToleranceCount` +1. The initial actual concurrency is set to the lower of either the value of the `MaxConcurrentCount` , or the value of `FailureToleranceCount` +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior.\n\nIf failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar.\n- `SOFT_FAILURE_TOLERANCE` : This option decouples `FailureToleranceCount` from the actual concurrency. This allows StackSet operations to run at the concurrency level set by the `MaxConcurrentCount` value, or `MaxConcurrentPercentage` , regardless of the number of failures.", @@ -7817,7 +7840,7 @@ "ParameterValue": "The input value associated with the parameter." }, "AWS::CloudFormation::StackSet StackInstances": { - "DeploymentTargets": "The AWS `OrganizationalUnitIds` or `Accounts` for which to create stack instances in the specified Regions.", + "DeploymentTargets": "The AWS Organizations accounts or AWS accounts to deploy stacks to in the specified Regions.", "ParameterOverrides": "A list of StackSet parameters whose values you want to override in the selected stack instances.", "Regions": "The names of one or more Regions where you want to create stack instances using the specified AWS accounts ." }, @@ -8027,7 +8050,7 @@ "CNAMEs": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "CacheBehaviors": "A complex type that contains zero or more `CacheBehavior` elements.", "Comment": "A comment to describe the distribution. The comment cannot be longer than 128 characters.", - "ConnectionMode": "This field specifies whether the connection mode is through a standard distribution (direct) or a multi-tenant distribution with distribution tenants(tenant-only).", + "ConnectionMode": "This field specifies whether the connection mode is through a standard distribution (direct) or a multi-tenant distribution with distribution tenants (tenant-only).", "ContinuousDeploymentPolicyId": "> This field only supports standard distributions. You can't specify this field for multi-tenant distributions. For more information, see [Unsupported features for SaaS Manager for Amazon CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-config-options.html#unsupported-saas) in the *Amazon CloudFront Developer Guide* . \n\nThe identifier of a continuous deployment policy. For more information, see `CreateContinuousDeploymentPolicy` .", "CustomErrorResponses": "A complex type that controls the following:\n\n- Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n- How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n\nFor more information about custom error pages, see [Customizing Error Responses](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) in the *Amazon CloudFront Developer Guide* .", "CustomOrigin": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", @@ -10397,7 +10420,12 @@ "ContactLens": "", "ContactflowLogs": "", "EarlyMedia": "", + "EnhancedChatMonitoring": "", + "EnhancedContactMonitoring": "", + "HighVolumeOutBound": "", "InboundCalls": "", + "MultiPartyChatConference": "", + "MultiPartyConference": "", "OutboundCalls": "", "UseCustomTTSVoices": "" }, @@ -12758,7 +12786,7 @@ "AWS::DataSync::LocationNFS": { "MountOptions": "Specifies the options that DataSync can use to mount your NFS file server.", "OnPremConfig": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", - "ServerHostname": "Specifies the DNS name or IP version 4 address of the NFS file server that your DataSync agent connects to.", + "ServerHostname": "Specifies the DNS name or IP address (IPv4 or IPv6) of the NFS file server that your DataSync agent connects to.", "Subdirectory": "Specifies the export path in your NFS file server that you want DataSync to mount.\n\nThis path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see [Accessing NFS file servers](https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#accessing-nfs) .", "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location." }, @@ -12780,7 +12808,7 @@ "CustomSecretConfig": "Specifies configuration information for a customer-managed Secrets Manager secret where the secret key for a specific object storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.\n\n> You can use either `CmkSecretConfig` (with `SecretKey` ) or `CustomSecretConfig` (without `SecretKey` ) to provide credentials for a `CreateLocationObjectStorage` request. Do not provide both parameters for the same request.", "SecretKey": "Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server.\n\n> If you provide a secret using `SecretKey` , but do not provide secret configuration details using `CmkSecretConfig` or `CustomSecretConfig` , then DataSync stores the token using your AWS account's Secrets Manager secret.", "ServerCertificate": "Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single `.pem` file with a full certificate chain (for example, `file:///home/user/.ssh/object_storage_certificates.pem` ).\n\nThe certificate chain might include:\n\n- The object storage system's certificate\n- All intermediate certificates (if there are any)\n- The root certificate of the signing CA\n\nYou can concatenate your certificates into a `.pem` file (which can be up to 32768 bytes before base64 encoding). The following example `cat` command creates an `object_storage_certificates.pem` file that includes three certificates:\n\n`cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem`\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", - "ServerHostname": "Specifies the domain name or IP version 4 (IPv4) address of the object storage server that your DataSync agent connects to.", + "ServerHostname": "Specifies the domain name or IP address (IPv4 or IPv6) of the object storage server that your DataSync agent connects to.", "ServerPort": "Specifies the port that your object storage server accepts inbound network traffic on (for example, port 443).", "ServerProtocol": "Specifies the protocol that your object storage server uses to communicate. If not specified, the default value is `HTTPS` .", "Subdirectory": "Specifies the object prefix for your object storage server. If this is a source location, DataSync only copies objects with this prefix. If this is a destination location, DataSync writes all objects with this prefix.", @@ -12825,7 +12853,7 @@ "KerberosPrincipal": "", "MountOptions": "Specifies the version of the SMB protocol that DataSync uses to access your SMB file server.", "Password": "Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer. This parameter applies only if `AuthenticationType` is set to `NTLM` .", - "ServerHostname": "Specifies the domain name or IP address of the SMB file server that your DataSync agent connects to.\n\nRemember the following when configuring this parameter:\n\n- You can't specify an IP version 6 (IPv6) address.\n- If you're using Kerberos authentication, you must specify a domain name.", + "ServerHostname": "Specifies the domain name or IP address (IPv4 or IPv6) of the SMB file server that your DataSync agent connects to.\n\n> If you're using Kerberos authentication, you must specify a domain name.", "Subdirectory": "Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, `/path/to/subdirectory` ). Make sure that other SMB clients in your network can also mount this path.\n\nTo copy all data in the subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see [Providing DataSync access to SMB file servers](https://docs.aws.amazon.com/datasync/latest/userguide/create-smb-location.html#configuring-smb-permissions) .", "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location.", "User": "Specifies the user that can mount and access the files, folders, and file metadata in your SMB file server. This parameter applies only if `AuthenticationType` is set to `NTLM` .\n\nFor information about choosing a user with the right level of access for your transfer, see [Providing DataSync access to SMB file servers](https://docs.aws.amazon.com/datasync/latest/userguide/create-smb-location.html#configuring-smb-permissions) ." @@ -13712,11 +13740,13 @@ "DeletionProtection": "Protects clusters from being accidentally deleted. If enabled, the cluster cannot be deleted unless it is modified and `DeletionProtection` is disabled.", "EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to Amazon CloudWatch Logs. You can enable audit logs or profiler logs. For more information, see [Auditing Amazon DocumentDB Events](https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html) and [Profiling Amazon DocumentDB Operations](https://docs.aws.amazon.com/documentdb/latest/developerguide/profiling.html) .", "EngineVersion": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.\n\nIf you intend to trigger an in-place upgrade, please refer to [Amazon DocumentDB in-place major version upgrade](https://docs.aws.amazon.com/documentdb/latest/developerguide/docdb-mvu.html) . Note that for an in-place engine version upgrade, you need to remove other cluster properties changes (e.g. SecurityGroupId) from the CFN template.", + "GlobalClusterIdentifier": "The cluster identifier of the new global cluster.", "KmsKeyId": "The AWS KMS key identifier for an encrypted cluster.\n\nThe AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.\n\nIf an encryption key is not specified in `KmsKeyId` :\n\n- If the `StorageEncrypted` parameter is `true` , Amazon DocumentDB uses your default encryption key.\n\nAWS KMS creates the default encryption key for your AWS account . Your AWS account has a different default encryption key for each AWS Regions .", "ManageMasterUserPassword": "Specifies whether to manage the master user password with Amazon Web Services Secrets Manager.\n\nConstraint: You can't manage the master user password with Amazon Web Services Secrets Manager if `MasterUserPassword` is specified.", "MasterUserPassword": "The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@).\n\nConstraints: Must contain from 8 to 100 characters.", "MasterUserSecretKmsKeyId": "The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager. This setting is valid only if the master user password is managed by Amazon DocumentDB in Amazon Web Services Secrets Manager for the DB cluster.\n\nThe Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.\n\nIf you don't specify `MasterUserSecretKmsKeyId` , then the `aws/secretsmanager` KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the `aws/secretsmanager` KMS key to encrypt the secret, and you must use a customer managed KMS key.\n\nThere is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.", "MasterUsername": "The name of the master user for the cluster.\n\nConstraints:\n\n- Must be from 1 to 63 letters or numbers.\n- The first character must be a letter.\n- Cannot be a reserved word for the chosen database engine.", + "NetworkType": "", "Port": "Specifies the port that the database engine is listening on.", "PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled using the `BackupRetentionPeriod` parameter.\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region .\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region , occurring on a random day of the week.\n\nValid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun\n\nConstraints: Minimum 30-minute window.", @@ -14051,10 +14081,10 @@ "EndDate": "The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. The Capacity Reservation's state changes to `expired` when it reaches its end date and time.\n\nYou must provide an `EndDate` value if `EndDateType` is `limited` . Omit `EndDate` if `EndDateType` is `unlimited` .\n\nIf the `EndDateType` is `limited` , the Capacity Reservation is cancelled within an hour from the specified time. For example, if you specify 5/31/2019, 13:30:55, the Capacity Reservation is guaranteed to end between 13:30:55 and 14:30:55 on 5/31/2019.\n\nIf you are requesting a future-dated Capacity Reservation, you can't specify an end date and time that is within the commitment duration.", "EndDateType": "Indicates the way in which the Capacity Reservation ends. A Capacity Reservation can have one of the following end types:\n\n- `unlimited` - The Capacity Reservation remains active until you explicitly cancel it. Do not provide an `EndDate` if the `EndDateType` is `unlimited` .\n- `limited` - The Capacity Reservation expires automatically at a specified date and time. You must provide an `EndDate` value if the `EndDateType` value is `limited` .", "EphemeralStorage": "*Deprecated.*", - "InstanceCount": "The number of instances for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for an instance count with a minimum of 100 vCPUs. For example, if you request a future-dated Capacity Reservation for `m5.xlarge` instances, you must request at least 25 instances ( *25 * m5.xlarge = 100 vCPUs* ). \n\nValid range: 1 - 1000", + "InstanceCount": "The number of instances for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for an instance count with a minimum of 64 vCPUs. For example, if you request a future-dated Capacity Reservation for `m5.xlarge` instances, you must request at least 25 instances ( *16 * m5.xlarge = 64 vCPUs* ). \n\nValid range: 1 - 1000", "InstanceMatchCriteria": "Indicates the type of instance launches that the Capacity Reservation accepts. The options include:\n\n- `open` - The Capacity Reservation automatically matches all instances that have matching attributes (instance type, platform, and Availability Zone). Instances that have matching attributes run in the Capacity Reservation automatically without specifying any additional parameters.\n- `targeted` - The Capacity Reservation only accepts instances that have matching attributes (instance type, platform, and Availability Zone), and explicitly target the Capacity Reservation. This ensures that only permitted instances can use the reserved capacity.\n\n> If you are requesting a future-dated Capacity Reservation, you must specify `targeted` . \n\nDefault: `open`", "InstancePlatform": "The type of operating system for which to reserve capacity.", - "InstanceType": "The instance type for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for instance types in the C, M, R, I, and T instance families only. \n\nFor more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .", + "InstanceType": "The instance type for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for instance types in the C, M, R, I, T, and G instance families only. \n\nFor more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .", "OutPostArn": "> Not supported for future-dated Capacity Reservations. \n\nThe Amazon Resource Name (ARN) of the Outpost on which to create the Capacity Reservation.", "PlacementGroupArn": "> Not supported for future-dated Capacity Reservations. \n\nThe Amazon Resource Name (ARN) of the cluster placement group in which to create the Capacity Reservation. For more information, see [Capacity Reservations for cluster placement groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cr-cpg.html) in the *Amazon EC2 User Guide* .", "TagSpecifications": "The tags to apply to the Capacity Reservation during launch.", @@ -14709,7 +14739,7 @@ }, "AWS::EC2::InstanceConnectEndpoint": { "ClientToken": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.", - "PreserveClientIp": "Indicates whether the client IP address is preserved as the source. The following are the possible values.\n\n- `true` - Use the client IP address as the source.\n- `false` - Use the network interface IP address as the source.\n\nDefault: `false`", + "PreserveClientIp": "Indicates whether the client IP address is preserved as the source. The following are the possible values.\n\n- `true` - Use the client IP address as the source.\n- `false` - Use the network interface IP address as the source.\n\n> `PreserveClientIp` is only supported on IPv4 EC2 Instance Connect Endpoints. To use `PreserveClientIp` , the value for `IpAddressType` must be `ipv4` . \n\nDefault: `false`", "SecurityGroupIds": "One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint.", "SubnetId": "The ID of the subnet in which to create the EC2 Instance Connect Endpoint.", "Tags": "The tags to apply to the EC2 Instance Connect Endpoint during creation." @@ -16544,9 +16574,9 @@ "CapacityProviderStrategy": "The capacity provider strategy to use for the service.\n\nIf a `capacityProviderStrategy` is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\nA capacity provider strategy can contain a maximum of 20 capacity providers.\n\n> To remove this property from your service resource, specify an empty `CapacityProviderStrategyItem` array.", "Cluster": "The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed.", "DeploymentConfiguration": "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.", - "DeploymentController": "The deployment controller to use for the service. If no deployment controller is specified, the default value of `ECS` is used.", + "DeploymentController": "The deployment controller to use for the service.", "DesiredCount": "The number of instantiations of the specified task definition to place and keep running in your service.\n\nFor new services, if a desired count is not specified, a default value of `1` is used. When using the `DAEMON` scheduling strategy, the desired count is not required.\n\nFor existing services, if a desired count is not specified, it is omitted from the operation.", - "EnableECSManagedTags": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you need to set the `propagateTags` request parameter.", + "EnableECSManagedTags": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you must set the `propagateTags` request parameter.", "EnableExecuteCommand": "Determines whether the execute command functionality is turned on for the service. If `true` , the execute command functionality is turned on for all containers in tasks as part of the service.", "HealthCheckGracePeriodSeconds": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing, VPC Lattice, and container health checks after a task has first started. If you don't specify a health check grace period value, the default value of `0` is used. If you don't use any of the health checks, then `healthCheckGracePeriodSeconds` is unused.\n\nIf your service's tasks take a while to start and respond to health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", "LaunchType": "The launch type on which to run your service. For more information, see [Amazon ECS Launch Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .", @@ -16566,6 +16596,12 @@ "VolumeConfigurations": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.\n\n> To remove this property from your service resource, specify an empty `ServiceVolumeConfiguration` array.", "VpcLatticeConfigurations": "The VPC Lattice configuration for the service being created." }, + "AWS::ECS::Service AdvancedConfiguration": { + "AlternateTargetGroupArn": "The Amazon Resource Name (ARN) of the alternate target group for Amazon ECS blue/green deployments.", + "ProductionListenerRule": "The Amazon Resource Name (ARN) that that identifies the production listener rule (in the case of an Application Load Balancer) or listener (in the case for an Network Load Balancer) for routing production traffic.", + "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that grants Amazon ECS permission to call the Elastic Load Balancing APIs for you.", + "TestListenerRule": "The Amazon Resource Name (ARN) that identifies ) that identifies the test listener rule (in the case of an Application Load Balancer) or listener (in the case for an Network Load Balancer) for routing test traffic." + }, "AWS::ECS::Service AwsVpcConfiguration": { "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address.\n\nConsider the following when you set this value:\n\n- When you use `create-service` or `update-service` , the default is `DISABLED` .\n- When the service `deploymentController` is `ECS` , the value must be `DISABLED` .", "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified.\n\n> All specified security groups must be from the same VPC.", @@ -16587,12 +16623,20 @@ }, "AWS::ECS::Service DeploymentConfiguration": { "Alarms": "Information about the CloudWatch alarms.", + "BakeTimeInMinutes": "The duration when both blue and green service revisions are running simultaneously after the production traffic has shifted.\n\nThe following rules apply when you don't specify a value:\n\n- For rolling deployments, the value is set to 3 hours (180 minutes).\n- When you use an external deployment controller ( `EXTERNAL` ), or the CodeDeploy blue/green deployment controller ( `CODE_DEPLOY` ), the value is set to 3 hours (180 minutes).\n- For all other cases, the value is set to 36 hours (2160 minutes).", "DeploymentCircuitBreaker": "> The deployment circuit breaker can only be used for services using the rolling update ( `ECS` ) deployment type. \n\nThe *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", + "LifecycleHooks": "An array of deployment lifecycle hook objects to run custom logic at specific stages of the deployment lifecycle.", "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nThe Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the service uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.", - "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nIf any tasks are unhealthy and if `maximumPercent` doesn't allow the Amazon ECS scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one \u2014 using the `minimumHealthyPercent` as a constraint \u2014 to clear up capacity to launch replacement tasks. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." + "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nIf any tasks are unhealthy and if `maximumPercent` doesn't allow the Amazon ECS scheduler to start replacement tasks, the scheduler stops the unhealthy tasks one-by-one \u2014 using the `minimumHealthyPercent` as a constraint \u2014 to clear up capacity to launch replacement tasks. For more information about how the scheduler replaces unhealthy tasks, see [Amazon ECS services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) .\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "Strategy": "The deployment strategy for the service. Choose from these valid values:\n\n- `ROLLING` - When you create a service which uses the rolling update ( `ROLLING` ) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.\n- `BLUE_GREEN` - A blue/green deployment strategy ( `BLUE_GREEN` ) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed." }, "AWS::ECS::Service DeploymentController": { - "Type": "The deployment controller type to use. There are three deployment controller types available:\n\n- **ECS** - The rolling update ( `ECS` ) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the [DeploymentConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html) .\n- **CODE_DEPLOY** - The blue/green ( `CODE_DEPLOY` ) deployment type uses the blue/green deployment model powered by AWS CodeDeploy , which allows you to verify a new deployment of a service before sending production traffic to it.\n- **EXTERNAL** - The external ( `EXTERNAL` ) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service." + "Type": "The deployment controller type to use.\n\nThe deployment controller is the mechanism that determines how tasks are deployed for your service. The valid options are:\n\n- ECS\n\nWhen you create a service which uses the `ECS` deployment controller, you can choose between the following deployment strategies:\n\n- `ROLLING` : When you create a service which uses the *rolling update* ( `ROLLING` ) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.\n\nRolling update deployments are best suited for the following scenarios:\n\n- Gradual service updates: You need to update your service incrementally without taking the entire service offline at once.\n- Limited resource requirements: You want to avoid the additional resource costs of running two complete environments simultaneously (as required by blue/green deployments).\n- Acceptable deployment time: Your application can tolerate a longer deployment process, as rolling updates replace tasks one by one.\n- No need for instant roll back: Your service can tolerate a rollback process that takes minutes rather than seconds.\n- Simple deployment process: You prefer a straightforward deployment approach without the complexity of managing multiple environments, target groups, and listeners.\n- No load balancer requirement: Your service doesn't use or require a load balancer, Application Load Balancer , Network Load Balancer , or Service Connect (which are required for blue/green deployments).\n- Stateful applications: Your application maintains state that makes it difficult to run two parallel environments.\n- Cost sensitivity: You want to minimize deployment costs by not running duplicate environments during deployment.\n\nRolling updates are the default deployment strategy for services and provide a balance between deployment safety and resource efficiency for many common application scenarios.\n- `BLUE_GREEN` : A *blue/green* deployment strategy ( `BLUE_GREEN` ) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed.\n\nAmazon ECS blue/green deployments are best suited for the following scenarios:\n\n- Service validation: When you need to validate new service revisions before directing production traffic to them\n- Zero downtime: When your service requires zero-downtime deployments\n- Instant roll back: When you need the ability to quickly roll back if issues are detected\n- Load balancer requirement: When your service uses Application Load Balancer , Network Load Balancer , or Service Connect\n- External\n\nUse a third-party deployment controller.\n- Blue/green deployment (powered by CodeDeploy )\n\nCodeDeploy installs an updated version of the application as a new replacement task set and reroutes production traffic from the original application task set to the replacement task set. The original task set is terminated after a successful deployment. Use this deployment controller to verify a new deployment of a service before sending production traffic to it.\n\nWhen updating the deployment controller for a service, consider the following depending on the type of migration you're performing.\n\n- If you have a template that contains the `EXTERNAL` deployment controller information as well as `TaskSet` and `PrimaryTaskSet` resources, and you remove the task set resources from the template when updating from `EXTERNAL` to `ECS` , the `DescribeTaskSet` and `DeleteTaskSet` API calls will return a 400 error after the deployment controller is updated to `ECS` . This results in a delete failure on the task set resources, even though the stack transitions to `UPDATE_COMPLETE` status. For more information, see [Resource removed from stack but not deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-resource-removed-not-deleted) in the AWS CloudFormation User Guide. To fix this issue, delete the task sets directly using the Amazon ECS `DeleteTaskSet` API. For more information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the Amazon Elastic Container Service API Reference.\n- If you're migrating from `CODE_DEPLOY` to `ECS` with a new task definition and AWS CloudFormation performs a rollback operation, the Amazon ECS `UpdateService` request fails with the following error:\n\nResource handler returned message: \"Invalid request provided: Unable to update task definition on services with a CODE_DEPLOY deployment controller.\n- After a successful migration from `ECS` to `EXTERNAL` deployment controller, you need to manually remove the `ACTIVE` task set, because Amazon ECS no longer manages the deployment. For information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the Amazon Elastic Container Service API Reference." + }, + "AWS::ECS::Service DeploymentLifecycleHook": { + "HookTargetArn": "The Amazon Resource Name (ARN) of the hook target. Currently, only Lambda function ARNs are supported.\n\nYou must provide this parameter when configuring a deployment lifecycle hook.", + "LifecycleStages": "The lifecycle stages at which to run the hook. Choose from these valid values:\n\n- RECONCILE_SERVICE\n\nThe reconciliation stage that only happens when you start a new service deployment with more than 1 service revision in an ACTIVE state.\n\nYou can use a lifecycle hook for this stage.\n- PRE_SCALE_UP\n\nThe green service revision has not started. The blue service revision is handling 100% of the production traffic. There is no test traffic.\n\nYou can use a lifecycle hook for this stage.\n- POST_SCALE_UP\n\nThe green service revision has started. The blue service revision is handling 100% of the production traffic. There is no test traffic.\n\nYou can use a lifecycle hook for this stage.\n- TEST_TRAFFIC_SHIFT\n\nThe blue and green service revisions are running. The blue service revision handles 100% of the production traffic. The green service revision is migrating from 0% to 100% of test traffic.\n\nYou can use a lifecycle hook for this stage.\n- POST_TEST_TRAFFIC_SHIFT\n\nThe test traffic shift is complete. The green service revision handles 100% of the test traffic.\n\nYou can use a lifecycle hook for this stage.\n- PRODUCTION_TRAFFIC_SHIFT\n\nProduction traffic is shifting to the green service revision. The green service revision is migrating from 0% to 100% of production traffic.\n\nYou can use a lifecycle hook for this stage.\n- POST_PRODUCTION_TRAFFIC_SHIFT\n\nThe production traffic shift is complete.\n\nYou can use a lifecycle hook for this stage.\n\nYou must provide this parameter when configuring a deployment lifecycle hook.", + "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that grants Amazon ECS permission to call Lambda functions on your behalf.\n\nFor more information, see [Permissions required for Lambda functions in Amazon ECS blue/green deployments](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/blue-green-permissions.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::Service EBSTagSpecification": { "PropagateTags": "Determines whether to propagate the tags from the task definition to the Amazon EBS volume. Tags can only propagate to a `SERVICE` specified in `ServiceVolumeConfiguration` . If no value is specified, the tags aren't propagated.", @@ -16600,6 +16644,7 @@ "Tags": "The tags applied to this Amazon EBS volume. `AmazonECSCreated` and `AmazonECSManaged` are reserved tags that can't be used." }, "AWS::ECS::Service LoadBalancer": { + "AdvancedConfiguration": "The advanced settings for the load balancer used in blue/green deployments. Specify the alternate target group, listener rules, and IAM role required for traffic shifting during blue/green deployments.", "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", "ContainerPort": "The port on the container to associate with the load balancer. This port must correspond to a `containerPort` in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they're launched on must allow ingress traffic on the `hostPort` of the port mapping.", "LoadBalancerName": "The name of the load balancer to associate with the Amazon ECS service or task set.\n\nIf you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.", @@ -16627,7 +16672,8 @@ }, "AWS::ECS::Service ServiceConnectClientAlias": { "DnsName": "The `dnsName` is the name that you use in the applications of client tasks to connect to this service. The name must be a valid DNS name but doesn't need to be fully-qualified. The name can include up to 127 characters. The name can include lowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name can't start with a hyphen.\n\nIf this parameter isn't specified, the default value of `discoveryName.namespace` is used. If the `discoveryName` isn't specified, the port mapping name from the task definition is used in `portName.namespace` .\n\nTo avoid changing your applications in client Amazon ECS services, set this to the same name that the client application uses by default. For example, a few common names are `database` , `db` , or the lowercase name of a database, such as `mysql` or `redis` . For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", - "Port": "The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace.\n\nTo avoid changing your applications in client Amazon ECS services, set this to the same port that the client application uses by default. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* ." + "Port": "The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace.\n\nTo avoid changing your applications in client Amazon ECS services, set this to the same port that the client application uses by default. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", + "TestTrafficRules": "The configuration for test traffic routing rules used during blue/green deployments with Amazon ECS Service Connect. This allows you to route a portion of traffic to the new service revision of your service for testing before shifting all production traffic." }, "AWS::ECS::Service ServiceConnectConfiguration": { "Enabled": "Specifies whether to use Service Connect with this service.", @@ -16643,6 +16689,16 @@ "Timeout": "A reference to an object that represents the configured timeouts for Service Connect.", "Tls": "A reference to an object that represents a Transport Layer Security (TLS) configuration." }, + "AWS::ECS::Service ServiceConnectTestTrafficRules": { + "Header": "The HTTP header-based routing rules that determine which requests should be routed to the new service version during blue/green deployment testing. These rules provide fine-grained control over test traffic routing based on request headers." + }, + "AWS::ECS::Service ServiceConnectTestTrafficRulesHeader": { + "Name": "", + "Value": "" + }, + "AWS::ECS::Service ServiceConnectTestTrafficRulesHeaderValue": { + "Exact": "" + }, "AWS::ECS::Service ServiceConnectTlsCertificateAuthority": { "AwsPcaAuthorityArn": "The ARN of the AWS Private Certificate Authority certificate." }, @@ -18952,6 +19008,7 @@ "Description": "The event bus description.", "EventSourceName": "If you are creating a partner event bus, this specifies the partner event source that the new event bus will be matched with.", "KmsKeyIdentifier": "The identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.\n\nIf you do not specify a customer managed key identifier, EventBridge uses an AWS owned key to encrypt events on the event bus.\n\nFor more information, see [Identify and view keys](https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html) in the *AWS Key Management Service Developer Guide* .\n\n> Schema discovery is not supported for event buses encrypted using a customer managed key. EventBridge returns an error if:\n> \n> - You call `[CreateDiscoverer](https://docs.aws.amazon.com/eventbridge/latest/schema-reference/v1-discoverers.html#CreateDiscoverer)` on an event bus set to use a customer managed key for encryption.\n> - You call `[UpdatedEventBus](https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_UpdatedEventBus.html)` to set a customer managed key on an event bus with schema discovery enabled.\n> \n> To enable schema discovery on an event bus, choose to use an AWS owned key . For more information, see [Encrypting events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-encryption-event-bus-cmkey.html) in the *Amazon EventBridge User Guide* . > If you have specified that EventBridge use a customer managed key for encrypting the source event bus, we strongly recommend you also specify a customer managed key for any archives for the event bus as well.\n> \n> For more information, see [Encrypting archives](https://docs.aws.amazon.com/eventbridge/latest/userguide/encryption-archives.html) in the *Amazon EventBridge User Guide* .", + "LogConfig": "The logging configuration settings for the event bus.\n\nFor more information, see [Configuring logs for event buses](https://docs.aws.amazon.com/eb-event-bus-logs.html) in the *EventBridge User Guide* .", "Name": "The name of the new event bus.\n\nCustom event bus names can't contain the `/` character, but you can use the `/` character in partner event bus names. In addition, for partner event buses, the name must exactly match the name of the partner event source that this event bus is matched to.\n\nYou can't use the name `default` for a custom event bus, as this name is already used for your account's default event bus.", "Policy": "The permissions policy of the event bus, describing which other AWS accounts can write events to this event bus.", "Tags": "Tags to associate with the event bus." @@ -18959,6 +19016,10 @@ "AWS::Events::EventBus DeadLetterConfig": { "Arn": "The ARN of the SQS queue specified as the target for the dead-letter queue." }, + "AWS::Events::EventBus LogConfig": { + "IncludeDetail": "Whether EventBridge include detailed event information in the records it generates. Detailed data can be useful for troubleshooting and debugging. This information includes details of the event itself, as well as target details.\n\nFor more information, see [Including detail data in event bus logs](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus-logs.html#eb-event-logs-data) in the *EventBridge User Guide* .", + "Level": "The level of logging detail to include. This applies to all log destinations for the event bus.\n\nFor more information, see [Specifying event bus log level](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-bus-logs.html#eb-event-bus-logs-level) in the *EventBridge User Guide* ." + }, "AWS::Events::EventBus Tag": { "Key": "A string you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.", "Value": "The value for the specified tag key." @@ -21780,6 +21841,7 @@ "AWS::GuardDuty::IPSet": { "Activate": "Indicates whether or not GuardDuty uses the `IPSet` .", "DetectorId": "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", + "ExpectedBucketOwner": "Contains information on the owner of the bucket.", "Format": "The format of the file that contains the IPSet.", "Location": "The URI of the file that contains the IPSet.", "Name": "The user-friendly name to identify the IPSet.\n\nAllowed characters are alphanumeric, whitespace, dash (-), and underscores (_).", @@ -21846,6 +21908,7 @@ "AWS::GuardDuty::ThreatIntelSet": { "Activate": "A Boolean value that indicates whether GuardDuty is to start using the uploaded ThreatIntelSet.", "DetectorId": "The unique ID of the detector of the GuardDuty account for which you want to create a `ThreatIntelSet` .\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", + "ExpectedBucketOwner": "Contains information on the owner of the bucket.", "Format": "The format of the file that contains the ThreatIntelSet.", "Location": "The URI of the file that contains the ThreatIntelSet.", "Name": "A user-friendly ThreatIntelSet name displayed in all findings that are generated by activity that involves IP addresses included in this ThreatIntelSet.", @@ -24438,6 +24501,49 @@ "Unit": "A unit of time.", "Value": "A number of time units." }, + "AWS::IoTManagedIntegrations::CredentialLocker": { + "Name": "The name of the credential locker.", + "Tags": "A set of key/value pairs that are used to manage the credential locker." + }, + "AWS::IoTManagedIntegrations::ManagedThing": { + "AuthenticationMaterial": "The authentication material defining the device connectivity setup requests. The authentication materials used are the device bar code.", + "AuthenticationMaterialType": "The type of authentication material used for device connectivity setup requests.", + "Brand": "The brand of the device.", + "CapabilityReport": "A report of the capabilities for the managed thing.", + "Classification": "The classification of the managed thing such as light bulb or thermostat.", + "CredentialLockerId": "The identifier of the credential locker for the managed thing.", + "MetaData": "The metadata for the managed thing.\n\n> The `managedThing metadata` parameter is used for associating attributes with a `managedThing` that can be used for grouping over-the-air (OTA) tasks. Name value pairs in `metadata` can be used in the `OtaTargetQueryString` parameter for the `CreateOtaTask` API operation.", + "Model": "The model of the device.", + "Name": "The name of the managed thing representing the physical device.", + "Owner": "Owner of the device, usually an indication of whom the device belongs to. This value should not contain personal identifiable information.", + "Role": "The type of device used. This will be the hub controller, cloud device, or IoT device.", + "SerialNumber": "The serial number of the device.", + "Tags": "A set of key/value pairs that are used to manage the managed thing." + }, + "AWS::IoTManagedIntegrations::ManagedThing CapabilityReport": { + "Endpoints": "The endpoints used in the capability report.", + "NodeId": "The numeric identifier of the node.", + "Version": "The version of the capability report." + }, + "AWS::IoTManagedIntegrations::ManagedThing CapabilityReportCapability": { + "Actions": "The capability actions used in the capability report.", + "Events": "The capability events used in the capability report.", + "Id": "The id of the schema version.", + "Name": "The name of the capability.", + "Properties": "The capability properties used in the capability report.", + "Version": "The version of the capability." + }, + "AWS::IoTManagedIntegrations::ManagedThing CapabilityReportEndpoint": { + "Capabilities": "The capabilities used in the capability report.", + "DeviceTypes": "The type of device.", + "Id": "The id of the endpoint used in the capability report." + }, + "AWS::IoTManagedIntegrations::ProvisioningProfile": { + "CaCertificate": "The id of the certificate authority (CA) certificate.", + "Name": "The name of the provisioning template.", + "ProvisioningType": "The type of provisioning workflow the device uses for onboarding to IoT managed integrations.", + "Tags": "A set of key/value pairs that are used to manage the provisioning profile." + }, "AWS::IoTSiteWise::AccessPolicy": { "AccessPolicyIdentity": "The identity for this access policy. Choose an IAM Identity Center user, an IAM Identity Center group, or an IAM user.", "AccessPolicyPermission": "The permission level for this access policy. Note that a project `ADMINISTRATOR` is also known as a project owner.", @@ -24614,12 +24720,12 @@ "GatewayCapabilitySummaries": "A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use [DescribeGatewayCapabilityConfiguration](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGatewayCapabilityConfiguration.html) .", "GatewayName": "A unique name for the gateway.", "GatewayPlatform": "The gateway's platform. You can only specify one platform in a gateway.", - "GatewayVersion": "The version of the gateway. A value of `3` indicates an MQTT-enabled, V3 gateway, while `2` indicates a Classic streams, V2 gateway.", + "GatewayVersion": "", "Tags": "A list of key-value pairs that contain metadata for the gateway. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, "AWS::IoTSiteWise::Gateway GatewayCapabilitySummary": { "CapabilityConfiguration": "The JSON document that defines the configuration for the gateway capability. For more information, see [Configuring data sources (CLI)](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/configure-sources.html#configure-source-cli) in the *AWS IoT SiteWise User Guide* .", - "CapabilityNamespace": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` ." + "CapabilityNamespace": "The namespace of the capability configuration. For example, if you configure OPC UA sources for an MQTT-enabled gateway, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:3` ." }, "AWS::IoTSiteWise::Gateway GatewayPlatform": { "GreengrassV2": "A gateway that runs on AWS IoT Greengrass V2 .", @@ -26156,7 +26262,8 @@ "SizeInMBs": "The size of the buffer, in MBs, that Kinesis Data Firehose uses for incoming data before delivering it to the destination. For valid values, see the `SizeInMBs` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* ." }, "AWS::KinesisFirehose::DeliveryStream CatalogConfiguration": { - "CatalogArn": "Specifies the Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format `arn:aws:glue:region:account-id:catalog` ." + "CatalogArn": "Specifies the Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format `arn:aws:glue:region:account-id:catalog` .", + "WarehouseLocation": "The warehouse location for Apache Iceberg tables. You must configure this when schema evolution and table creation is enabled.\n\nAmazon Data Firehose is in preview release and is subject to change." }, "AWS::KinesisFirehose::DeliveryStream CloudWatchLoggingOptions": { "Enabled": "Indicates whether CloudWatch Logs logging is enabled.", @@ -26218,6 +26325,7 @@ "AWS::KinesisFirehose::DeliveryStream DestinationTableConfiguration": { "DestinationDatabaseName": "The name of the Apache Iceberg database.", "DestinationTableName": "Specifies the name of the Apache Iceberg Table.", + "PartitionSpec": "The partition spec configuration for a table that is used by automatic table creation.\n\nAmazon Data Firehose is in preview release and is subject to change.", "S3ErrorOutputPrefix": "The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.", "UniqueKeys": "A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table." }, @@ -26313,6 +26421,8 @@ "RetryOptions": "", "RoleARN": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.", "S3Configuration": "", + "SchemaEvolutionConfiguration": "The configuration to enable automatic schema evolution.\n\nAmazon Data Firehose is in preview release and is subject to change.", + "TableCreationConfiguration": "The configuration to enable automatic table creation.\n\nAmazon Data Firehose is in preview release and is subject to change.", "s3BackupMode": "Describes how Firehose will backup records. Currently,S3 backup only supports `FailedDataOnly` ." }, "AWS::KinesisFirehose::DeliveryStream InputFormatConfiguration": { @@ -26359,6 +26469,12 @@ "PageSizeBytes": "The Parquet page size. Column chunks are divided into pages. A page is conceptually an indivisible unit (in terms of compression and encoding). The minimum value is 64 KiB and the default is 1 MiB.", "WriterVersion": "Indicates the version of row format to output. The possible values are `V1` and `V2` . The default is `V1` ." }, + "AWS::KinesisFirehose::DeliveryStream PartitionField": { + "SourceName": "The column name to be configured in partition spec.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, + "AWS::KinesisFirehose::DeliveryStream PartitionSpec": { + "Identity": "List of identity [transforms](https://docs.aws.amazon.com/https://iceberg.apache.org/spec/#partition-transforms) that performs an identity transformation. The transform takes the source value, and does not modify it. Result type is the source type.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream ProcessingConfiguration": { "Enabled": "Indicates whether data processing is enabled (true) or disabled (false).", "Processors": "The data processors." @@ -26409,6 +26525,9 @@ "TableName": "Specifies the AWS Glue table that contains the column information that constitutes your data schema.\n\n> If the `SchemaConfiguration` request parameter is used as part of invoking the `CreateDeliveryStream` API, then the `TableName` property is required and its value must be specified.", "VersionId": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Firehose uses the most recent version. This means that any updates to the table are automatically picked up." }, + "AWS::KinesisFirehose::DeliveryStream SchemaEvolutionConfiguration": { + "Enabled": "Specify whether you want to enable schema evolution.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream SecretsManagerConfiguration": { "Enabled": "Specifies whether you want to use the secrets manager feature. When set as `True` the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to `False` Firehose falls back to the credentials in the destination configuration.", "RoleARN": "Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk.", @@ -26474,6 +26593,9 @@ "AWS::KinesisFirehose::DeliveryStream SplunkRetryOptions": { "DurationInSeconds": "The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt." }, + "AWS::KinesisFirehose::DeliveryStream TableCreationConfiguration": { + "Enabled": "Specify whether you want to enable automatic table creation.\n\nAmazon Data Firehose is in preview release and is subject to change." + }, "AWS::KinesisFirehose::DeliveryStream Tag": { "Key": "A unique identifier for the tag. Maximum length: 128 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @", "Value": "An optional string, which you can use to describe or define the tag. Maximum length: 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % @" @@ -28284,7 +28406,7 @@ "ParseKeyValue": "Use this parameter to include the [parseKeyValue](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation-Processors.html#CloudWatch-Logs-Transformation-parseKeyValue) processor in your transformer.", "ParsePostgres": "Use this parameter to include the [parsePostGres](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parsePostGres) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", "ParseRoute53": "Use this parameter to include the [parseRoute53](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation-Processors.html#CloudWatch-Logs-Transformation-parseRoute53) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", - "ParseToOCSF": "Use this processor to convert logs into Open Cybersecurity Schema Framework (OCSF) format", + "ParseToOCSF": "Use this parameter to convert logs into Open Cybersecurity Schema (OCSF) format.", "ParseVPC": "Use this parameter to include the [parseVPC](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation-Processors.html#CloudWatch-Logs-Transformation-parseVPC) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", "ParseWAF": "Use this parameter to include the [parseWAF](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-parseWAF) processor in your transformer.\n\nIf you use this processor, it must be the first processor in your transformer.", "RenameKeys": "Use this parameter to include the [renameKeys](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch-Logs-Transformation.html#CloudWatch-Logs-Transformation-renameKeys) processor in your transformer.", @@ -31029,7 +31151,7 @@ "InputSwitchConfiguration": "The configuration for input switching based on the media quality confidence score (MQCS) as provided from AWS Elemental MediaLive.", "InputType": "The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.\n\nThe allowed values are:\n\n- `HLS` - The HLS streaming specification (which defines M3U8 manifests and TS segments).\n- `CMAF` - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).", "OutputHeaderConfiguration": "The settings for what common media server data (CMSD) headers AWS Elemental MediaPackage includes in responses to the CDN.", - "Tags": "The tags associated with the channel." + "Tags": "" }, "AWS::MediaPackageV2::Channel IngestEndpoint": { "Id": "The identifier associated with the ingest endpoint of the channel.", @@ -31042,8 +31164,8 @@ "PublishMQCS": "When true, AWS Elemental MediaPackage includes the MQCS in responses to the CDN. This setting is valid only when `InputType` is `CMAF` ." }, "AWS::MediaPackageV2::Channel Tag": { - "Key": "", - "Value": "" + "Key": "The key in the key:value pair for the tag.", + "Value": "The value in the key:value pair for the tag." }, "AWS::MediaPackageV2::ChannelGroup": { "ChannelGroupName": "The name of the channel group.", @@ -31051,8 +31173,8 @@ "Tags": "The tags associated with the channel group." }, "AWS::MediaPackageV2::ChannelGroup Tag": { - "Key": "", - "Value": "" + "Key": "The key in the key:value pair for the tag.", + "Value": "The value in the key:value pair for the tag." }, "AWS::MediaPackageV2::ChannelPolicy": { "ChannelGroupName": "The name of the channel group associated with the channel policy.", @@ -31073,18 +31195,56 @@ "StartoverWindowSeconds": "The size of the window (in seconds) to specify a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window.", "Tags": "The tags associated with the origin endpoint." }, + "AWS::MediaPackageV2::OriginEndpoint DashBaseUrl": { + "DvbPriority": "For use with DVB-DASH profiles only. The priority of this location for servings segments. The lower the number, the higher the priority.", + "DvbWeight": "For use with DVB-DASH profiles only. The weighting for source locations that have the same priority.", + "ServiceLocation": "The name of the source location.", + "Url": "A source location for segments." + }, + "AWS::MediaPackageV2::OriginEndpoint DashDvbFontDownload": { + "FontFamily": "The `fontFamily` name for subtitles, as described in [EBU-TT-D Subtitling Distribution Format](https://docs.aws.amazon.com/https://tech.ebu.ch/publications/tech3380) .", + "MimeType": "The `mimeType` of the resource that's at the font download URL.\n\nFor information about font MIME types, see the [MPEG-DASH Profile for Transport of ISO BMFF Based DVB Services over IP Based Networks](https://docs.aws.amazon.com/https://dvb.org/wp-content/uploads/2021/06/A168r4_MPEG-DASH-Profile-for-Transport-of-ISO-BMFF-Based-DVB-Services_Draft-ts_103-285-v140_November_2021.pdf) document.", + "Url": "The URL for downloading fonts for subtitles." + }, + "AWS::MediaPackageV2::OriginEndpoint DashDvbMetricsReporting": { + "Probability": "The number of playback devices per 1000 that will send error reports to the reporting URL. This represents the probability that a playback device will be a reporting player for this session.", + "ReportingUrl": "The URL where playback devices send error reports." + }, + "AWS::MediaPackageV2::OriginEndpoint DashDvbSettings": { + "ErrorMetrics": "Playback device error reporting settings.", + "FontDownload": "Subtitle font settings." + }, "AWS::MediaPackageV2::OriginEndpoint DashManifestConfiguration": { - "DrmSignaling": "", - "FilterConfiguration": "", - "ManifestName": "", - "ManifestWindowSeconds": "", - "MinBufferTimeSeconds": "", - "MinUpdatePeriodSeconds": "", - "PeriodTriggers": "", - "ScteDash": "", - "SegmentTemplateFormat": "", - "SuggestedPresentationDelaySeconds": "", - "UtcTiming": "" + "BaseUrls": "The base URLs to use for retrieving segments.", + "Compactness": "The layout of the DASH manifest that MediaPackage produces. `STANDARD` indicates a default manifest, which is compacted. `NONE` indicates a full manifest.\n\nFor information about compactness, see [DASH manifest compactness](https://docs.aws.amazon.com/mediapackage/latest/userguide/compacted.html) in the *AWS Elemental MediaPackage v2 User Guide* .", + "DrmSignaling": "Determines how the DASH manifest signals the DRM content.", + "DvbSettings": "For endpoints that use the DVB-DASH profile only. The font download and error reporting information that you want MediaPackage to pass through to the manifest.", + "FilterConfiguration": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", + "ManifestName": "A short string that's appended to the endpoint URL. The child manifest name creates a unique path to this endpoint.", + "ManifestWindowSeconds": "The total duration (in seconds) of the manifest's content.", + "MinBufferTimeSeconds": "Minimum amount of content (in seconds) that a player must keep available in the buffer.", + "MinUpdatePeriodSeconds": "Minimum amount of time (in seconds) that the player should wait before requesting updates to the manifest.", + "PeriodTriggers": "A list of triggers that controls when AWS Elemental MediaPackage separates the MPEG-DASH manifest into multiple periods. Type `ADS` to indicate that AWS Elemental MediaPackage must create periods in the output manifest that correspond to SCTE-35 ad markers in the input source. Leave this value empty to indicate that the manifest is contained all in one period. For more information about periods in the DASH manifest, see [Multi-period DASH in AWS Elemental MediaPackage](https://docs.aws.amazon.com/mediapackage/latest/userguide/multi-period.html) .", + "Profiles": "The profile that the output is compliant with.", + "ProgramInformation": "Details about the content that you want MediaPackage to pass through in the manifest to the playback device.", + "ScteDash": "The SCTE configuration.", + "SegmentTemplateFormat": "Determines the type of variable used in the `media` URL of the `SegmentTemplate` tag in the manifest. Also specifies if segment timeline information is included in `SegmentTimeline` or `SegmentTemplate` .\n\nValue description:\n\n- `NUMBER_WITH_TIMELINE` - The `$Number$` variable is used in the `media` URL. The value of this variable is the sequential number of the segment. A full `SegmentTimeline` object is presented in each `SegmentTemplate` .", + "SubtitleConfiguration": "The configuration for DASH subtitles.", + "SuggestedPresentationDelaySeconds": "The amount of time (in seconds) that the player should be from the end of the manifest.", + "UtcTiming": "Determines the type of UTC timing included in the DASH Media Presentation Description (MPD)." + }, + "AWS::MediaPackageV2::OriginEndpoint DashProgramInformation": { + "Copyright": "A copyright statement about the content.", + "LanguageCode": "The language code for this manifest.", + "MoreInformationUrl": "An absolute URL that contains more information about this content.", + "Source": "Information about the content provider.", + "Title": "The title for the manifest." + }, + "AWS::MediaPackageV2::OriginEndpoint DashSubtitleConfiguration": { + "TtmlConfiguration": "Settings for TTML subtitles." + }, + "AWS::MediaPackageV2::OriginEndpoint DashTtmlConfiguration": { + "TtmlProfile": "The profile that MediaPackage uses when signaling subtitles in the manifest. `IMSC` is the default profile. `EBU-TT-D` produces subtitles that are compliant with the EBU-TT-D TTML profile. MediaPackage passes through subtitle styles to the manifest. For more information about EBU-TT-D subtitles, see [EBU-TT-D Subtitling Distribution Format](https://docs.aws.amazon.com/https://tech.ebu.ch/publications/tech3380) ." }, "AWS::MediaPackageV2::OriginEndpoint DashUtcTiming": { "TimingMode": "The UTC timing mode.", @@ -31116,25 +31276,25 @@ }, "AWS::MediaPackageV2::OriginEndpoint HlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the HLS manifest configuration.", - "FilterConfiguration": "", + "FilterConfiguration": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", "ManifestName": "The name of the manifest associated with the HLS manifest configuration.", "ManifestWindowSeconds": "The duration of the manifest window, in seconds, for the HLS manifest configuration.", "ProgramDateTimeIntervalSeconds": "The `EXT-X-PROGRAM-DATE-TIME` interval, in seconds, associated with the HLS manifest configuration.", "ScteHls": "THE SCTE-35 HLS configuration associated with the HLS manifest configuration.", - "StartTag": "", + "StartTag": "To insert an EXT-X-START tag in your HLS playlist, specify a StartTag configuration object with a valid TimeOffset. When you do, you can also optionally specify whether to include a PRECISE value in the EXT-X-START tag.", "Url": "The URL of the HLS manifest configuration.", - "UrlEncodeChildManifest": "" + "UrlEncodeChildManifest": "When enabled, MediaPackage URL-encodes the query string for API requests for HLS child manifests to comply with AWS Signature Version 4 (SigV4) signature signing protocol. For more information, see [AWS Signature Version 4 for API requests](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv.html) in *AWS Identity and Access Management User Guide* ." }, "AWS::MediaPackageV2::OriginEndpoint LowLatencyHlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint.", - "FilterConfiguration": "", + "FilterConfiguration": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", "ManifestName": "A short string that's appended to the endpoint URL. The manifest name creates a unique path to this endpoint. If you don't enter a value, MediaPackage uses the default manifest name, `index` . MediaPackage automatically inserts the format extension, such as `.m3u8` . You can't use the same manifest name if you use HLS manifest and low-latency HLS manifest. The `manifestName` on the `HLSManifest` object overrides the `manifestName` you provided on the `originEndpoint` object.", "ManifestWindowSeconds": "The total duration (in seconds) of the manifest's content.", "ProgramDateTimeIntervalSeconds": "Inserts `EXT-X-PROGRAM-DATE-TIME` tags in the output manifest at the interval that you specify. If you don't enter an interval, `EXT-X-PROGRAM-DATE-TIME` tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.\n\nIrrespective of this parameter, if any `ID3Timed` metadata is in the HLS input, MediaPackage passes through that metadata to the HLS output.", "ScteHls": "The SCTE-35 HLS configuration associated with the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint.", - "StartTag": "", + "StartTag": "To insert an EXT-X-START tag in your HLS playlist, specify a StartTag configuration object with a valid TimeOffset. When you do, you can also optionally specify whether to include a PRECISE value in the EXT-X-START tag.", "Url": "The URL of the low-latency HLS (LL-HLS) manifest configuration of the origin endpoint.", - "UrlEncodeChildManifest": "" + "UrlEncodeChildManifest": "When enabled, MediaPackage URL-encodes the query string for API requests for LL-HLS child manifests to comply with AWS Signature Version 4 (SigV4) signature signing protocol. For more information, see [AWS Signature Version 4 for API requests](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv.html) in *AWS Identity and Access Management User Guide* ." }, "AWS::MediaPackageV2::OriginEndpoint Scte": { "ScteFilter": "The filter associated with the SCTE-35 configuration." @@ -31166,8 +31326,8 @@ "TimeOffset": "Specify the value for TIME-OFFSET within your EXT-X-START tag. Enter a signed floating point value which, if positive, must be less than the configured manifest duration minus three times the configured segment target duration. If negative, the absolute value must be larger than three times the configured segment target duration, and the absolute value must be smaller than the configured manifest duration." }, "AWS::MediaPackageV2::OriginEndpoint Tag": { - "Key": "", - "Value": "" + "Key": "The key in the key:value pair for the tag.", + "Value": "The value in the key:value pair for the tag." }, "AWS::MediaPackageV2::OriginEndpointPolicy": { "ChannelGroupName": "The name of the channel group associated with the origin endpoint policy.", @@ -32417,7 +32577,7 @@ "Main": "The path of the main definition file for the workflow.", "Name": "The workflow's name.", "ParameterTemplate": "The workflow's parameter template.", - "StorageCapacity": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version.", + "StorageCapacity": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version. The `storageCapacity` can be overwritten at run time. The storage capacity is not required for runs with a `DYNAMIC` storage type.", "StorageType": "", "Tags": "Tags for the workflow." }, @@ -32953,7 +33113,7 @@ "PreferredMaintenanceWindow": "The start time for a one-hour period each week during which AWS OpsWorks CM performs maintenance on the instance. Valid values must be specified in the following format: `DDD:HH:MM` . `MM` must be specified as `00` . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See `TimeWindowDefinition` for more information.\n\n*Example:* `Mon:08:00` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)", "SecurityGroupIds": "A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by `SubnetIds` .\n\nIf you do not specify this parameter, AWS OpsWorks CM creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).", "ServerName": "The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters.", - "ServiceRoleArn": "The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.", + "ServiceRoleArn": "The service role that the AWS OpsWorks CM service backend uses to work with your account.", "SubnetIds": "The IDs of subnets in which to launch the server EC2 instance.\n\nAmazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.\n\nEC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.\n\nFor more information about supported Amazon EC2 platforms, see [Supported Platforms](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) .", "Tags": "A map that contains tag keys and tag values to attach to an AWS OpsWorks for Chef Automate or OpsWorks for Puppet Enterprise server.\n\n- The key cannot be empty.\n- The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: `+ - = . _ : / @`\n- The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: `+ - = . _ : / @`\n- Leading and trailing spaces are trimmed from both the key and value.\n- A maximum of 50 user-applied tags is allowed for any AWS OpsWorks CM server." }, @@ -44340,7 +44500,7 @@ "PermissionArns": "Specifies the [Amazon Resource Names (ARNs)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the AWS RAM permission to associate with the resource share. If you do not specify an ARN for the permission, AWS RAM automatically attaches the default version of the permission for each resource type. You can associate only one permission with each resource type included in the resource share.", "Principals": "Specifies the principals to associate with the resource share. The possible values are:\n\n- An AWS account ID\n- An Amazon Resource Name (ARN) of an organization in AWS Organizations\n- An ARN of an organizational unit (OU) in AWS Organizations\n- An ARN of an IAM role\n- An ARN of an IAM user\n\n> Not all resource types can be shared with IAM roles and users. For more information, see the column *Can share with IAM roles and users* in the tables on [Shareable AWS resources](https://docs.aws.amazon.com/ram/latest/userguide/shareable.html) in the *AWS Resource Access Manager User Guide* .", "ResourceArns": "Specifies a list of one or more ARNs of the resources to associate with the resource share.", - "Sources": "", + "Sources": "Specifies from which source accounts the service principal has access to the resources in this resource share.", "Tags": "Specifies one or more tags to attach to the resource share itself. It doesn't attach the tags to the resources associated with the resource share." }, "AWS::RAM::ResourceShare Tag": { @@ -44372,7 +44532,7 @@ "AvailabilityZones": "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "BackupRetentionPeriod": "The number of days for which automated backups are retained.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "ClusterScalabilityType": "Specifies the scalability mode of the Aurora DB cluster. When set to `limitless` , the cluster operates as an Aurora Limitless Database, allowing you to create a DB shard group for horizontal scaling (sharding) capabilities. When set to `standard` (the default), the cluster uses normal DB instance creation.", + "ClusterScalabilityType": "Specifies the scalability mode of the Aurora DB cluster. When set to `limitless` , the cluster operates as an Aurora Limitless Database, allowing you to create a DB shard group for horizontal scaling (sharding) capabilities. When set to `standard` (the default), the cluster uses normal DB instance creation.\n\n*Important:* Automated backup retention isn't supported with Aurora Limitless Database clusters. If you set this property to `limitless` , you cannot set `DeleteAutomatedBackups` to `false` . To create a backup, use manual snapshots instead.", "CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- First character must be a letter.\n- Can't end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster1`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterInstanceClass": "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example `db.m6gd.xlarge` . Not all DB instance classes are available in all AWS Regions , or for all database engines.\n\nFor the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nValid for Cluster Type: Multi-AZ DB clusters only", @@ -44382,6 +44542,7 @@ "DBSystemId": "Reserved for future use.", "DatabaseInsightsMode": "The mode of Database Insights to enable for the DB cluster.\n\nIf you set this value to `advanced` , you must also set the `PerformanceInsightsEnabled` parameter to `true` and the `PerformanceInsightsRetentionPeriod` parameter to 465.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "DatabaseName": "The name of your database. If you don't provide a name, then Amazon RDS won't create a database in this DB cluster. For naming constraints, see [Naming Constraints](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "DeleteAutomatedBackups": "Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted, unless the AWS Backup policy specifies a point-in-time restore rule.", "DeletionProtection": "A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "Domain": "Indicates the directory ID of the Active Directory to create the DB cluster.\n\nFor Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.\n\nFor more information, see [Kerberos authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", "DomainIAMRoleName": "Specifies the name of the IAM role to use when making API calls to the Directory Service.\n\nValid for: Aurora DB clusters only", @@ -44417,7 +44578,7 @@ "ScalingConfiguration": "The scaling configuration of an Aurora Serverless v1 DB cluster.\n\nThis property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the `ServerlessV2ScalingConfiguration` property.\n\nValid for: Aurora Serverless v1 DB clusters only", "ServerlessV2ScalingConfiguration": "The scaling configuration of an Aurora Serverless V2 DB cluster.\n\nThis property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the `ScalingConfiguration` property.\n\nValid for: Aurora Serverless v2 DB clusters only", "SnapshotIdentifier": "The identifier for the DB snapshot or DB cluster snapshot to restore from.\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.\n\nAfter you restore a DB cluster with a `SnapshotIdentifier` property, you must specify the same `SnapshotIdentifier` property for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the snapshot again, and the data in the database is not changed. However, if you don't specify the `SnapshotIdentifier` property, an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB cluster is restored from the specified `SnapshotIdentifier` property, and the original DB cluster is deleted.\n\nIf you specify the `SnapshotIdentifier` property to restore a DB cluster (as opposed to specifying it for DB cluster updates), then don't specify the following properties:\n\n- `GlobalClusterIdentifier`\n- `MasterUsername`\n- `MasterUserPassword`\n- `ReplicationSourceIdentifier`\n- `RestoreType`\n- `SourceDBClusterIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `UseLatestRestorableTime`\n\nConstraints:\n\n- Must match the identifier of an existing Snapshot.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "SourceDBClusterIdentifier": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n- Cannot be specified if `SourceDbClusterResourceId` is specified. You must specify either `SourceDBClusterIdentifier` or `SourceDbClusterResourceId` , but not both.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to `false` .\n\n> If you specify both the `StorageEncrypted` and `SnapshotIdentifier` properties without specifying the `KmsKeyId` property, then the restored DB cluster inherits the encryption settings from the DB snapshot that provide. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", @@ -44479,6 +44640,7 @@ "AutomaticBackupReplicationRetentionPeriod": "The retention period for automated backups in a different AWS Region. Use this parameter to set a unique retention period that only applies to cross-Region automated backups. To enable automated backups in a different Region, specify a positive value for the `AutomaticBackupReplicationRegion` parameter.\n\nIf not specified, this parameter defaults to the value of the `BackupRetentionPeriod` parameter. The maximum allowed value is 35.", "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\nFor Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region .\n\nConstraints:\n\n- The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment.\n- The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\nExample: `us-east-1d`", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", + "BackupTarget": "The location for storing automated backups and manual snapshots.\n\nValid Values:\n\n- `local` (Dedicated Local Zone)\n- `outposts` ( AWS Outposts)\n- `region` ( AWS Region )\n\nDefault: `region`\n\nFor more information, see [Working with Amazon RDS on AWS Outposts](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) in the *Amazon RDS User Guide* .", "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", "CertificateRotationRestart": "Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate.\n\nBy default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.\n\n> Set this parameter only if you are *not* using SSL/TLS to connect to the DB instance. \n\nIf you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:\n\n- For more information about rotating your SSL/TLS certificate for RDS DB engines, see [Rotating Your SSL/TLS Certificate.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide.*\n- For more information about rotating your SSL/TLS certificate for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .\n\nThis setting doesn't apply to RDS Custom DB instances.", "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", @@ -45997,9 +46159,10 @@ "BucketName": "A name for the bucket. If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. The bucket name must contain only lowercase letters, numbers, periods (.), and dashes (-) and must follow [Amazon S3 bucket restrictions and limitations](https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) . For more information, see [Rules for naming Amazon S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "CorsConfiguration": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see [Enabling Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the *Amazon S3 User Guide* .", "IntelligentTieringConfigurations": "Defines how Amazon S3 handles Intelligent-Tiering storage.", - "InventoryConfigurations": "Specifies the inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", + "InventoryConfigurations": "Specifies the S3 Inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", "LifecycleConfiguration": "Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For more information, see [Object Lifecycle Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in the *Amazon S3 User Guide* .", "LoggingConfiguration": "Settings that define where logs are stored.", + "MetadataConfiguration": "The S3 Metadata configuration for a general purpose bucket.", "MetadataTableConfiguration": "The metadata table configuration of an Amazon S3 general purpose bucket.", "MetricsConfigurations": "Specifies a metrics configuration for the CloudWatch request metrics (specified by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased. For more information, see [PutBucketMetricsConfiguration](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) .", "NotificationConfiguration": "Configuration that defines how Amazon S3 handles bucket notifications.", @@ -46085,6 +46248,18 @@ "Prefix": "Specifies the inventory filter prefix.", "ScheduleFrequency": "Specifies the schedule for generating inventory results." }, + "AWS::S3::Bucket InventoryTableConfiguration": { + "ConfigurationState": "The configuration state of the inventory table, indicating whether the inventory table is enabled or disabled.", + "EncryptionConfiguration": "The encryption configuration for the inventory table.", + "TableArn": "The Amazon Resource Name (ARN) for the inventory table.", + "TableName": "The name of the inventory table." + }, + "AWS::S3::Bucket JournalTableConfiguration": { + "EncryptionConfiguration": "The encryption configuration for the journal table.", + "RecordExpiration": "The journal table record expiration settings for the journal table.", + "TableArn": "The Amazon Resource Name (ARN) for the journal table.", + "TableName": "The name of the journal table." + }, "AWS::S3::Bucket LambdaConfiguration": { "Event": "The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more information, see [Supported Event Types](https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the *Amazon S3 User Guide* .", "Filter": "The filtering rules that determine which objects invoke the AWS Lambda function. For example, you can create a filter so that only image files with a `.jpg` extension invoke the function when they are added to the Amazon S3 bucket.", @@ -46099,9 +46274,23 @@ "LogFilePrefix": "A prefix for all log object keys. If you store log files from multiple Amazon S3 buckets in a single bucket, you can use a prefix to distinguish which log files came from which bucket.", "TargetObjectKeyFormat": "Amazon S3 key format for log objects. Only one format, either PartitionedPrefix or SimplePrefix, is allowed." }, + "AWS::S3::Bucket MetadataConfiguration": { + "Destination": "The destination information for the S3 Metadata configuration.", + "InventoryTableConfiguration": "The inventory table configuration for a metadata configuration.", + "JournalTableConfiguration": "The journal table configuration for a metadata configuration." + }, + "AWS::S3::Bucket MetadataDestination": { + "TableBucketArn": "The Amazon Resource Name (ARN) of the table bucket where the metadata configuration is stored.", + "TableBucketType": "The type of the table bucket where the metadata configuration is stored. The `aws` value indicates an AWS managed table bucket, and the `customer` value indicates a customer-managed table bucket. V2 metadata configurations are stored in AWS managed table buckets, and V1 metadata configurations are stored in customer-managed table buckets.", + "TableNamespace": "The namespace in the table bucket where the metadata tables for a metadata configuration are stored." + }, "AWS::S3::Bucket MetadataTableConfiguration": { "S3TablesDestination": "The destination information for the metadata table configuration. The destination table bucket must be in the same Region and AWS account as the general purpose bucket. The specified metadata table name must be unique within the `aws_s3_metadata` namespace in the destination table bucket." }, + "AWS::S3::Bucket MetadataTableEncryptionConfiguration": { + "KmsKeyArn": "If server-side encryption with AWS Key Management Service ( AWS KMS ) keys (SSE-KMS) is specified, you must also specify the KMS key Amazon Resource Name (ARN). You must specify a customer-managed KMS key that's located in the same Region as the general purpose bucket that corresponds to the metadata table configuration.", + "SseAlgorithm": "The encryption type specified for a metadata table. To specify server-side encryption with AWS Key Management Service ( AWS KMS ) keys (SSE-KMS), use the `aws:kms` value. To specify server-side encryption with Amazon S3 managed keys (SSE-S3), use the `AES256` value." + }, "AWS::S3::Bucket Metrics": { "EventThreshold": "A container specifying the time threshold for emitting the `s3:Replication:OperationMissedThreshold` event.", "Status": "Specifies whether the replication metrics are enabled." @@ -46157,6 +46346,10 @@ "Filter": "The filtering rules that determine which objects trigger notifications. For example, you can create a filter so that Amazon S3 sends notifications only when image files with a `.jpg` extension are added to the bucket. For more information, see [Configuring event notifications using object key name filtering](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/notification-how-to-filtering.html) in the *Amazon S3 User Guide* .", "Queue": "The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type. FIFO queues are not allowed when enabling an SQS queue as the event notification destination." }, + "AWS::S3::Bucket RecordExpiration": { + "Days": "If you enable journal table record expiration, you can set the number of days to retain your journal table records. Journal table records must be retained for a minimum of 7 days. To set this value, specify any whole number from `7` to `2147483647` . For example, to retain your journal table records for one year, set this value to `365` .", + "Expiration": "Specifies whether journal table record expiration is enabled or disabled." + }, "AWS::S3::Bucket RedirectAllRequestsTo": { "HostName": "Name of the host where requests are redirected.", "Protocol": "Protocol to use when redirecting requests. The default is the protocol that is used in the original request." @@ -46488,7 +46681,8 @@ "BucketName": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Zone (Availability Zone or Local Zone). The bucket name must also follow the format `*bucket_base_name* -- *zone_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "DataRedundancy": "The number of Zone (Availability Zone or Local Zone) that's used for redundancy for the bucket.", "LifecycleConfiguration": "Container for lifecycle rules. You can add as many as 1000 rules.\n\nFor more information see, [Creating and managing a lifecycle configuration for directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-lifecycle.html ) in the *Amazon S3 User Guide* .", - "LocationName": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is `usw2-az1` ." + "LocationName": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the Zone ID of the Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An example AZ ID value is `usw2-az1` .", + "Tags": "An array of tags that you can apply to the S3 directory bucket. Tags are key-value pairs of metadata used to categorize and organize your buckets, track costs, and control access. For more information, see [Using tags with directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html) ." }, "AWS::S3Express::DirectoryBucket AbortIncompleteMultipartUpload": { "DaysAfterInitiation": "Specifies the number of days after which Amazon S3 aborts an incomplete multipart upload." @@ -46516,6 +46710,10 @@ "BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. S3 Bucket Keys are always enabled for `GET` and `PUT` operations on a directory bucket and can\u2019t be disabled. It's only allowed to set the `BucketKeyEnabled` element to `true` .\n\nS3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) , [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) , [the Copy operation in Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops) , or [the import jobs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job) . In this case, Amazon S3 makes a call to AWS KMS every time a copy request is made for a KMS-encrypted object.\n\nFor more information, see [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-UsingKMSEncryption.html#s3-express-sse-kms-bucket-keys) in the *Amazon S3 User Guide* .", "ServerSideEncryptionByDefault": "Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied." }, + "AWS::S3Express::DirectoryBucket Tag": { + "Key": "The key of the tag. Tags are key-value pairs that you use to label your directory buckets. Tags can help you organize, track costs for, and control access to directory buckets. For more information, see [Using tags with directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html) .", + "Value": "The value of the tag. Tags are key-value pairs that you use to label your directory buckets. Tags can help you organize, track costs for, and control access to directory buckets. For more information, see [Using tags with directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-tagging.html) ." + }, "AWS::S3ObjectLambda::AccessPoint": { "Name": "The name of this access point.", "ObjectLambdaConfiguration": "A configuration used when creating an Object Lambda Access Point." @@ -46668,6 +46866,10 @@ "ResourcePolicy": "The bucket policy JSON for the table bucket.", "TableBucketARN": "The Amazon Resource Name (ARN) of the table bucket." }, + "AWS::S3Tables::TablePolicy": { + "ResourcePolicy": "", + "TableARN": "" + }, "AWS::SDB::Domain": { "Description": "Information about the SimpleDB domain." }, @@ -47254,7 +47456,7 @@ "CalendarNames": "The names or Amazon Resource Names (ARNs) of the Change Calendar type documents your associations are gated under. The associations only run when that Change Calendar is open. For more information, see [AWS Systems Manager Change Calendar](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-change-calendar) in the *AWS Systems Manager User Guide* .", "ComplianceSeverity": "The severity level that is assigned to the association.", "DocumentVersion": "The version of the SSM document to associate with the target.\n\n> Note the following important information.\n> \n> - State Manager doesn't support running associations that use a new version of a document if that document is shared from another account. State Manager always runs the `default` version of a document if shared from another account, even though the Systems Manager console shows that a new version was processed. If you want to run an association using a new version of a document shared form another account, you must set the document version to `default` .\n> - `DocumentVersion` is not valid for documents owned by AWS , such as `AWS-RunPatchBaseline` or `AWS-UpdateSSMAgent` . If you specify `DocumentVersion` for an AWS document, the system returns the following error: \"Error occurred during operation 'CreateAssociation'.\" (RequestToken: , HandlerErrorCode: GeneralServiceException).", - "InstanceId": "The ID of the instance that the SSM document is associated with. You must specify the `InstanceId` or `Targets` property.\n\n> `InstanceId` has been deprecated. To specify an instance ID for an association, use the `Targets` parameter. If you use the parameter `InstanceId` , you cannot use the parameters `AssociationName` , `DocumentVersion` , `MaxErrors` , `MaxConcurrency` , `OutputLocation` , or `ScheduleExpression` . To use these parameters, you must use the `Targets` parameter.", + "InstanceId": "> `InstanceId` has been deprecated. To specify an instance ID for an association, use the `Targets` parameter. If you use the parameter `InstanceId` , you cannot use the parameters `AssociationName` , `DocumentVersion` , `MaxErrors` , `MaxConcurrency` , `OutputLocation` , or `ScheduleExpression` . To use these parameters, you must use the `Targets` parameter.\n> \n> Note that in some examples later in this page, `InstanceIds` is used as the tag-key name in a `Targets` filter. `InstanceId` is not used as a parameter. \n\nThe ID of the instance that the SSM document is associated with. You must specify the `InstanceId` or `Targets` property.", "MaxConcurrency": "The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.\n\nIf a new managed node starts and attempts to run an association while Systems Manager is running `MaxConcurrency` associations, the association is allowed to run. During the next association interval, the new managed node will process its association within the limit specified for `MaxConcurrency` .", "MaxErrors": "The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 managed nodes and set `MaxError` to 10%, then the system stops sending the request when the sixth error is received.\n\nExecutions that are already running an association when `MaxErrors` is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set `MaxConcurrency` to 1 so that executions proceed one at a time.", "Name": "The name of the SSM document that contains the configuration information for the instance. You can specify `Command` or `Automation` documents. The documents can be AWS -predefined documents, documents you created, or a document that is shared with you from another account. For SSM documents that are shared with you from other AWS accounts , you must specify the complete SSM document ARN, in the following format:\n\n`arn:partition:ssm:region:account-id:document/document-name`\n\nFor example: `arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document`\n\nFor AWS -predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, `AWS -ApplyPatchBaseline` or `My-Document` .", @@ -47335,7 +47537,7 @@ "AWS::SSM::MaintenanceWindowTask": { "CutoffBehavior": "The specification for whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached.", "Description": "A description of the task.", - "LoggingInfo": "Information about an Amazon S3 bucket to write Run Command task-level logs to.\n\n> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs for Run Command tasks, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS ::SSM::MaintenanceWindowTask MaintenanceWindowRunCommandParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-maintenancewindowruncommandparameters.html) .", + "LoggingInfo": "> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs for Run Command tasks, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS ::SSM::MaintenanceWindowTask MaintenanceWindowRunCommandParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-maintenancewindowruncommandparameters.html) . \n\nInformation about an Amazon S3 bucket to write Run Command task-level logs to.", "MaxConcurrency": "The maximum number of targets this task can be run for, in parallel.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only when you are registering or updating a [targetless task](https://docs.aws.amazon.com/systems-manager/latest/userguide/maintenance-windows-targetless-tasks.html) You must provide a value in all other cases.\n> \n> For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of `1` . This value doesn't affect the running of your task.", "MaxErrors": "The maximum number of errors allowed before this task stops being scheduled.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only when you are registering or updating a [targetless task](https://docs.aws.amazon.com/systems-manager/latest/userguide/maintenance-windows-targetless-tasks.html) You must provide a value in all other cases.\n> \n> For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of `1` . This value doesn't affect the running of your task.", "Name": "The task name.", @@ -47344,7 +47546,7 @@ "Targets": "The targets, either instances or window target IDs.\n\n- Specify instances using `Key=InstanceIds,Values= *instanceid1* , *instanceid2*` .\n- Specify window target IDs using `Key=WindowTargetIds,Values= *window-target-id-1* , *window-target-id-2*` .", "TaskArn": "The resource that the task uses during execution.\n\nFor `RUN_COMMAND` and `AUTOMATION` task types, `TaskArn` is the SSM document name or Amazon Resource Name (ARN).\n\nFor `LAMBDA` tasks, `TaskArn` is the function name or ARN.\n\nFor `STEP_FUNCTIONS` tasks, `TaskArn` is the state machine ARN.", "TaskInvocationParameters": "The parameters to pass to the task when it runs. Populate only the fields that match the task type. All other fields should be empty.\n\n> When you update a maintenance window task that has options specified in `TaskInvocationParameters` , you must provide again all the `TaskInvocationParameters` values that you want to retain. The values you do not specify again are removed. For example, suppose that when you registered a Run Command task, you specified `TaskInvocationParameters` values for `Comment` , `NotificationConfig` , and `OutputS3BucketName` . If you update the maintenance window task and specify only a different `OutputS3BucketName` value, the values for `Comment` and `NotificationConfig` are removed.", - "TaskParameters": "The parameters to pass to the task when it runs.\n\n> `TaskParameters` has been deprecated. To specify parameters to pass to a task when it runs, instead use the `Parameters` option in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [MaintenanceWindowTaskInvocationParameters](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowTaskInvocationParameters.html) .", + "TaskParameters": "> `TaskParameters` has been deprecated. To specify parameters to pass to a task when it runs, instead use the `Parameters` option in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [MaintenanceWindowTaskInvocationParameters](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowTaskInvocationParameters.html) . \n\nThe parameters to pass to the task when it runs.", "TaskType": "The type of task. Valid values: `RUN_COMMAND` , `AUTOMATION` , `LAMBDA` , `STEP_FUNCTIONS` .", "WindowId": "The ID of the maintenance window where the task is registered." }, @@ -47402,7 +47604,7 @@ "AllowedPattern": "A regular expression used to validate the parameter value. For example, for `String` types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", "DataType": "The data type of the parameter, such as `text` or `aws:ec2:image` . The default is `text` .", "Description": "Information about the parameter.", - "Name": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter Amazon Resource Name (ARN), is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", + "Name": "The name of the parameter.\n\n> The reported maximum length of 2048 characters for a parameter name includes 1037 characters that are reserved for internal use by Systems Manager . The maximum length for a parameter name that you specify is 1011 characters.\n> \n> This count of 1011 characters includes the characters in the ARN that precede the name you specify. This ARN length will vary depending on your partition and Region. For example, the following 45 characters count toward the 1011 character maximum for a parameter created in the US East (Ohio) Region: `arn:aws:ssm:us-east-2:111122223333:parameter/` .", "Policies": "Information about the policies assigned to a parameter.\n\n[Assigning parameter policies](https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) in the *AWS Systems Manager User Guide* .", "Tags": "Optional metadata that you assign to a resource in the form of an arbitrary set of tags (key-value pairs). Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter.", "Tier": "The parameter tier.", @@ -47433,7 +47635,7 @@ "PatchFilters": "The set of patch filters that make up the group." }, "AWS::SSM::PatchBaseline PatchSource": { - "Configuration": "The value of the yum repo configuration. For example:\n\n`[main]`\n\n`name=MyCustomRepository`\n\n`baseurl=https://my-custom-repository`\n\n`enabled=1`\n\n> For information about other options available for your yum repository configuration, see [dnf.conf(5)](https://docs.aws.amazon.com/https://man7.org/linux/man-pages/man5/dnf.conf.5.html) .", + "Configuration": "The value of the repo configuration.\n\n*Example for yum repositories*\n\n`[main]`\n\n`name=MyCustomRepository`\n\n`baseurl=https://my-custom-repository`\n\n`enabled=1`\n\nFor information about other options available for your yum repository configuration, see [dnf.conf(5)](https://docs.aws.amazon.com/https://man7.org/linux/man-pages/man5/dnf.conf.5.html) on the *man7.org* website.\n\n*Examples for Ubuntu Server and Debian Server*\n\n`deb http://security.ubuntu.com/ubuntu jammy main`\n\n`deb https://site.example.com/debian distribution component1 component2 component3`\n\nRepo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see [jammy (5) sources.list.5.gz](https://docs.aws.amazon.com/https://manpages.ubuntu.com/manpages/jammy/man5/sources.list.5.html) on the *Ubuntu Server Manuals* website and [sources.list format](https://docs.aws.amazon.com/https://wiki.debian.org/SourcesList#sources.list_format) on the *Debian Wiki* .", "Name": "The name specified to identify the patch source.", "Products": "The specific operating system versions a patch repository applies to, such as \"Ubuntu16.04\", \"RedhatEnterpriseLinux7.2\" or \"Suse12.7\". For lists of supported product values, see [PatchFilter](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html) in the *AWS Systems Manager API Reference* ." }, @@ -48153,6 +48355,7 @@ "EnvironmentId": "The ID of the environment that Amazon SageMaker Unified Studio associates with the domain.", "ProjectId": "The ID of the Amazon SageMaker Unified Studio project that corresponds to the domain.", "ProjectS3Path": "The location where Amazon S3 stores temporary execution data and other artifacts for the project that corresponds to the domain.", + "SingleSignOnApplicationArn": "The ARN of the application managed by SageMaker AI and SageMaker Unified Studio in the AWS IAM Identity Center.", "StudioWebPortalAccess": "Sets whether you can access the domain in Amazon SageMaker Studio:\n\n- **ENABLED** - You can access the domain in Amazon SageMaker Studio. If you migrate the domain to Amazon SageMaker Unified Studio, you can access it in both studio interfaces.\n- **DISABLED** - You can't access the domain in Amazon SageMaker Studio. If you migrate the domain to Amazon SageMaker Unified Studio, you can access it only in that studio interface.\n\nTo migrate a domain to Amazon SageMaker Unified Studio, you specify the UnifiedStudioSettings data type when you use the UpdateDomain action." }, "AWS::SageMaker::Domain UserSettings": { @@ -49480,7 +49683,18 @@ "ProjectName": "The name of the project.", "ServiceCatalogProvisionedProductDetails": "Details of a provisioned service catalog product. For information about service catalog, see [What is AWS Service Catalog](https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html) .", "ServiceCatalogProvisioningDetails": "The product ID and provisioning artifact ID to provision a service catalog. For information, see [What is AWS Service Catalog](https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html) .", - "Tags": "A list of key-value pairs to apply to this resource.\n\nFor more information, see [Resource Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) and [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) in the *AWS Billing and Cost Management User Guide* ." + "Tags": "A list of key-value pairs to apply to this resource.\n\nFor more information, see [Resource Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) and [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) in the *AWS Billing and Cost Management User Guide* .", + "TemplateProviderDetails": "An array of template providers associated with the project." + }, + "AWS::SageMaker::Project CfnStackParameter": { + "Key": "The name of the CloudFormation parameter.", + "Value": "The value of the CloudFormation parameter." + }, + "AWS::SageMaker::Project CfnTemplateProviderDetail": { + "Parameters": "An array of CloudFormation stack parameters.", + "RoleARN": "The IAM role used by CloudFormation to create the stack.", + "TemplateName": "The unique identifier of the template within the project.", + "TemplateURL": "The Amazon S3 URL of the CloudFormation template." }, "AWS::SageMaker::Project ProvisioningParameter": { "Key": "The key that identifies a provisioning parameter.", @@ -49500,6 +49714,9 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::Project TemplateProviderDetail": { + "CfnTemplateProviderDetail": "Details about a CloudFormation template provider configuration and associated provisioning information." + }, "AWS::SageMaker::Space": { "DomainId": "The ID of the associated domain.", "OwnershipSettings": "The collection of ownership settings for a space.", @@ -51149,6 +51366,7 @@ "SigningAlgorithm": "The algorithm that is used to sign the AS2 messages sent with the connector." }, "AWS::Transfer::Connector SftpConfig": { + "MaxConcurrentConnections": "Specify the number of concurrent connections that your connector creates to the remote server. The default value is `1` . The maximum values is `5` .\n\n> If you are using the AWS Management Console , the default value is `5` . \n\nThis parameter specifies the number of active connections that your connector can establish with the remote server at the same time. Increasing this value can enhance connector performance when transferring large file batches by enabling parallel operations.", "TrustedHostKeys": "The public portion of the host key, or keys, that are used to identify the external server to which you are connecting. You can use the `ssh-keyscan` command against the SFTP server to retrieve the necessary key.\n\n> `TrustedHostKeys` is optional for `CreateConnector` . If not provided, you can use `TestConnection` to retrieve the server host key during the initial connection attempt, and subsequently update the connector with the observed host key. \n\nThe three standard SSH public key format elements are `` , `` , and an optional `` , with spaces between each element. Specify only the `` and `` : do not enter the `` portion of the key.\n\nFor the trusted host key, AWS Transfer Family accepts RSA and ECDSA keys.\n\n- For RSA keys, the `` string is `ssh-rsa` .\n- For ECDSA keys, the `` string is either `ecdsa-sha2-nistp256` , `ecdsa-sha2-nistp384` , or `ecdsa-sha2-nistp521` , depending on the size of the key you generated.\n\nRun this command to retrieve the SFTP server host key, where your SFTP server name is `ftp.host.com` .\n\n`ssh-keyscan ftp.host.com`\n\nThis prints the public host key to standard output.\n\n`ftp.host.com ssh-rsa AAAAB3Nza... - Required when creating an SFTP connector\n> - Optional when updating an existing SFTP connector" }, @@ -51173,12 +51391,13 @@ "EndpointType": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n> After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n> \n> For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n> \n> It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` .", "IdentityProviderDetails": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` .", "IdentityProviderType": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type.", + "IpAddressType": "Specifies whether to use IPv4 only, or to use dual-stack (IPv4 and IPv6) for your AWS Transfer Family endpoint. The default value is `IPV4` .\n\n> The `IpAddressType` parameter has the following limitations:\n> \n> - It cannot be changed while the server is online. You must stop the server before modifying this parameter.\n> - It cannot be updated to `DUALSTACK` if the server has `AddressAllocationIds` specified. > When using `DUALSTACK` as the `IpAddressType` , you cannot set the `AddressAllocationIds` parameter for the [EndpointDetails](https://docs.aws.amazon.com/transfer/latest/APIReference/API_EndpointDetails.html) for the server.", "LoggingRole": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFS events. When set, you can view user activity in your CloudWatch logs.", "PostAuthenticationLoginBanner": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n> The SFTP protocol does not support post-authentication display banners.", "PreAuthenticationLoginBanner": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`", "ProtocolDetails": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`", "Protocols": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n> - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n> - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n> - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n> - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n> - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`", - "S3StorageOptions": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", + "S3StorageOptions": "Specifies whether or not performance for your Amazon S3 directories is optimized.\n\n- If using the console, this is enabled by default.\n- If using the API or CLI, this is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", "SecurityPolicyName": "Specifies the name of the security policy for the server.", "StructuredLogDestinations": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`", "Tags": "Key-value pairs that can be used to group and search for servers.", @@ -51205,7 +51424,7 @@ "TlsSessionResumptionMode": "A property used with Transfer Family servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. `TlsSessionResumptionMode` determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during `CreateServer` and `UpdateServer` calls. If a `TlsSessionResumptionMode` value is not specified during `CreateServer` , it is set to `ENFORCED` by default.\n\n- `DISABLED` : the server does not process TLS session resumption client requests and creates a new TLS session for each request.\n- `ENABLED` : the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing.\n- `ENFORCED` : the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to `ENFORCED` , test your clients.\n\n> Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the `ENFORCED` value, you need to test your clients." }, "AWS::Transfer::Server S3StorageOptions": { - "DirectoryListingOptimization": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + "DirectoryListingOptimization": "Specifies whether or not performance for your Amazon S3 directories is optimized.\n\n- If using the console, this is enabled by default.\n- If using the API or CLI, this is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." }, "AWS::Transfer::Server Tag": { "Key": "The name assigned to the tag that you create.", @@ -53018,6 +53237,34 @@ "MessageTemplateArn": "The Amazon Resource Name (ARN) of the message template.", "MessageTemplateContentSha256": "The content SHA256 of the message template." }, + "AWS::Wisdom::QuickResponse": { + "Channels": "The Amazon Connect contact channels this quick response applies to. The supported contact channel types include `Chat` .", + "Content": "The content of the quick response.", + "ContentType": "The media type of the quick response content.\n\n- Use `application/x.quickresponse;format=plain` for quick response written in plain text.\n- Use `application/x.quickresponse;format=markdown` for quick response written in richtext.", + "Description": "The description of the quick response.", + "GroupingConfiguration": "The configuration information of the user groups that the quick response is accessible to.", + "IsActive": "Whether the quick response is active.", + "KnowledgeBaseArn": "The Amazon Resource Name (ARN) of the knowledge base.", + "Language": "The language code value for the language in which the quick response is written. The supported language codes include `de_DE` , `en_US` , `es_ES` , `fr_FR` , `id_ID` , `it_IT` , `ja_JP` , `ko_KR` , `pt_BR` , `zh_CN` , `zh_TW`", + "Name": "The name of the quick response.", + "ShortcutKey": "The shortcut key of the quick response. The value should be unique across the knowledge base.", + "Tags": "The tags used to organize, track, or control access for this resource." + }, + "AWS::Wisdom::QuickResponse GroupingConfiguration": { + "Criteria": "The criteria used for grouping Amazon Q in Connect users.\n\nThe following is the list of supported criteria values.\n\n- `RoutingProfileArn` : Grouping the users by their [Amazon Connect routing profile ARN](https://docs.aws.amazon.com/connect/latest/APIReference/API_RoutingProfile.html) . User should have [SearchRoutingProfile](https://docs.aws.amazon.com/connect/latest/APIReference/API_SearchRoutingProfiles.html) and [DescribeRoutingProfile](https://docs.aws.amazon.com/connect/latest/APIReference/API_DescribeRoutingProfile.html) permissions when setting criteria to this value.", + "Values": "The list of values that define different groups of Amazon Q in Connect users.\n\n- When setting `criteria` to `RoutingProfileArn` , you need to provide a list of ARNs of [Amazon Connect routing profiles](https://docs.aws.amazon.com/connect/latest/APIReference/API_RoutingProfile.html) as values of this parameter." + }, + "AWS::Wisdom::QuickResponse QuickResponseContentProvider": { + "Content": "The content of the quick response." + }, + "AWS::Wisdom::QuickResponse QuickResponseContents": { + "Markdown": "The quick response content in markdown format.", + "PlainText": "The quick response content in plaintext format." + }, + "AWS::Wisdom::QuickResponse Tag": { + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" + }, "AWS::WorkSpaces::ConnectionAlias": { "ConnectionString": "The connection string specified for the connection alias. The connection string must be in the form of a fully qualified domain name (FQDN), such as `www.example.com` .", "Tags": "The tags to associate with the connection alias." diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 511bc24c0..b380afe86 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -18086,7 +18086,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", + "markdownDescription": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "title": "InstanceType", "type": "string" }, @@ -18332,7 +18332,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge", + "markdownDescription": "The instance type to use when launching the image builder. The following instance types are available:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n- stream.graphics.g5.xlarge\n- stream.graphics.g5.2xlarge\n- stream.graphics.g5.4xlarge\n- stream.graphics.g5.8xlarge\n- stream.graphics.g5.16xlarge\n- stream.graphics.g5.12xlarge\n- stream.graphics.g5.24xlarge\n- stream.graphics.g6.xlarge\n- stream.graphics.g6.2xlarge\n- stream.graphics.g6.4xlarge\n- stream.graphics.g6.8xlarge\n- stream.graphics.g6.16xlarge\n- stream.graphics.g6.12xlarge\n- stream.graphics.g6.24xlarge\n- stream.graphics.gr6.4xlarge\n- stream.graphics.gr6.8xlarge", "title": "InstanceType", "type": "string" }, @@ -32421,7 +32421,7 @@ "type": "array" }, "KeyAlgorithm": { - "markdownDescription": "Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate. For more information about selecting an algorithm, see [Key algorithms](https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate.html#algorithms) .\n\n> Algorithms supported for an ACM certificate request include:\n> \n> - `RSA_2048`\n> - `EC_prime256v1`\n> - `EC_secp384r1`\n> \n> Other listed algorithms are for imported certificates only. > When you request a private PKI certificate signed by a CA from AWS Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. \n\nDefault: RSA_2048", + "markdownDescription": "Specifies the algorithm of the public and private key pair that your certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not supported by all network clients. Some AWS services may require RSA keys, or only support ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the AWS service where you plan to deploy your certificate. For more information about selecting an algorithm, see [Key algorithms](https://docs.aws.amazon.com/acm/latest/userguide/acm-certificate-characteristics.html#algorithms-term) .\n\n> Algorithms supported for an ACM certificate request include:\n> \n> - `RSA_2048`\n> - `EC_prime256v1`\n> - `EC_secp384r1`\n> \n> Other listed algorithms are for imported certificates only. > When you request a private PKI certificate signed by a CA from AWS Private CA, the specified signing algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. \n\nDefault: RSA_2048", "title": "KeyAlgorithm", "type": "string" }, @@ -35388,7 +35388,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", + "markdownDescription": "Refines which accounts to deploy stacks to by specifying how to use the `Accounts` and `OrganizationalUnitIds` properties together.\n\nThe following values determine how CloudFormation selects target accounts:\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` property.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` property.\n- `UNION` : StackSet deploys to the OU, and the accounts specified in the `Accounts` property. `UNION` is not supported for create operations when using StackSet as a resource or the `CreateStackInstances` API.", "title": "AccountFilterType", "type": "string" }, @@ -35401,7 +35401,7 @@ "type": "array" }, "AccountsUrl": { - "markdownDescription": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).", + "markdownDescription": "The Amazon S3 URL path to a file that contains a list of AWS account IDs. The file format must be either `.csv` or `.txt` , and the data can be comma-separated or new-line-separated. There is currently a 10MB limit for the data (approximately 800,000 accounts).\n\nThis property serves the same purpose as `Accounts` but allows you to specify a large number of accounts.", "title": "AccountsUrl", "type": "string" }, @@ -35420,7 +35420,7 @@ "additionalProperties": false, "properties": { "Active": { - "markdownDescription": "When `true` , StackSets performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, StackSets starts queued operations in request order.\n\n> If there are already running or queued operations, StackSets queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your StackSet's execution configuration while there are running or queued operations for that StackSet. \n\nWhen `false` (default), StackSets performs one operation at a time in request order.", + "markdownDescription": "When `true` , CloudFormation performs non-conflicting operations concurrently and queues conflicting operations. After conflicting operations finish, CloudFormation starts queued operations in request order.\n\n> If there are already running or queued operations, CloudFormation queues all incoming operations even if they are non-conflicting.\n> \n> You can't modify your StackSet's execution configuration while there are running or queued operations for that StackSet. \n\nWhen `false` (default), StackSets performs one operation at a time in request order.", "title": "Active", "type": "boolean" } @@ -35491,7 +35491,7 @@ "properties": { "DeploymentTargets": { "$ref": "#/definitions/AWS::CloudFormation::StackSet.DeploymentTargets", - "markdownDescription": "The AWS `OrganizationalUnitIds` or `Accounts` for which to create stack instances in the specified Regions.", + "markdownDescription": "The AWS Organizations accounts or AWS accounts to deploy stacks to in the specified Regions.", "title": "DeploymentTargets" }, "ParameterOverrides": { @@ -62435,7 +62435,7 @@ "title": "OnPremConfig" }, "ServerHostname": { - "markdownDescription": "Specifies the DNS name or IP version 4 address of the NFS file server that your DataSync agent connects to.", + "markdownDescription": "Specifies the DNS name or IP address (IPv4 or IPv6) of the NFS file server that your DataSync agent connects to.", "title": "ServerHostname", "type": "string" }, @@ -62571,7 +62571,7 @@ "type": "string" }, "ServerHostname": { - "markdownDescription": "Specifies the domain name or IP version 4 (IPv4) address of the object storage server that your DataSync agent connects to.", + "markdownDescription": "Specifies the domain name or IP address (IPv4 or IPv6) of the object storage server that your DataSync agent connects to.", "title": "ServerHostname", "type": "string" }, @@ -62788,7 +62788,7 @@ "type": "string" }, "ServerHostname": { - "markdownDescription": "Specifies the domain name or IP address of the SMB file server that your DataSync agent connects to.\n\nRemember the following when configuring this parameter:\n\n- You can't specify an IP version 6 (IPv6) address.\n- If you're using Kerberos authentication, you must specify a domain name.", + "markdownDescription": "Specifies the domain name or IP address (IPv4 or IPv6) of the SMB file server that your DataSync agent connects to.\n\n> If you're using Kerberos authentication, you must specify a domain name.", "title": "ServerHostname", "type": "string" }, @@ -68339,7 +68339,7 @@ "type": "boolean" }, "InstanceCount": { - "markdownDescription": "The number of instances for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for an instance count with a minimum of 100 vCPUs. For example, if you request a future-dated Capacity Reservation for `m5.xlarge` instances, you must request at least 25 instances ( *25 * m5.xlarge = 100 vCPUs* ). \n\nValid range: 1 - 1000", + "markdownDescription": "The number of instances for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for an instance count with a minimum of 64 vCPUs. For example, if you request a future-dated Capacity Reservation for `m5.xlarge` instances, you must request at least 25 instances ( *16 * m5.xlarge = 64 vCPUs* ). \n\nValid range: 1 - 1000", "title": "InstanceCount", "type": "number" }, @@ -68354,7 +68354,7 @@ "type": "string" }, "InstanceType": { - "markdownDescription": "The instance type for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for instance types in the C, M, R, I, and T instance families only. \n\nFor more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .", + "markdownDescription": "The instance type for which to reserve capacity.\n\n> You can request future-dated Capacity Reservations for instance types in the C, M, R, I, T, and G instance families only. \n\nFor more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .", "title": "InstanceType", "type": "string" }, @@ -72242,7 +72242,7 @@ "type": "string" }, "PreserveClientIp": { - "markdownDescription": "Indicates whether the client IP address is preserved as the source. The following are the possible values.\n\n- `true` - Use the client IP address as the source.\n- `false` - Use the network interface IP address as the source.\n\nDefault: `false`", + "markdownDescription": "Indicates whether the client IP address is preserved as the source. The following are the possible values.\n\n- `true` - Use the client IP address as the source.\n- `false` - Use the network interface IP address as the source.\n\n> `PreserveClientIp` is only supported on IPv4 EC2 Instance Connect Endpoints. To use `PreserveClientIp` , the value for `IpAddressType` must be `ipv4` . \n\nDefault: `false`", "title": "PreserveClientIp", "type": "boolean" }, @@ -83451,7 +83451,7 @@ }, "DeploymentController": { "$ref": "#/definitions/AWS::ECS::Service.DeploymentController", - "markdownDescription": "The deployment controller to use for the service. If no deployment controller is specified, the default value of `ECS` is used.", + "markdownDescription": "The deployment controller to use for the service.", "title": "DeploymentController" }, "DesiredCount": { @@ -83460,7 +83460,7 @@ "type": "number" }, "EnableECSManagedTags": { - "markdownDescription": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you need to set the `propagateTags` request parameter.", + "markdownDescription": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nWhen you use Amazon ECS managed tags, you must set the `propagateTags` request parameter.", "title": "EnableECSManagedTags", "type": "boolean" }, @@ -83717,7 +83717,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The deployment controller type to use. There are three deployment controller types available:\n\n- **ECS** - The rolling update ( `ECS` ) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the [DeploymentConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html) .\n- **CODE_DEPLOY** - The blue/green ( `CODE_DEPLOY` ) deployment type uses the blue/green deployment model powered by AWS CodeDeploy , which allows you to verify a new deployment of a service before sending production traffic to it.\n- **EXTERNAL** - The external ( `EXTERNAL` ) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service.", + "markdownDescription": "The deployment controller type to use.\n\nThe deployment controller is the mechanism that determines how tasks are deployed for your service. The valid options are:\n\n- ECS\n\nWhen you create a service which uses the `ECS` deployment controller, you can choose between the following deployment strategies:\n\n- `ROLLING` : When you create a service which uses the *rolling update* ( `ROLLING` ) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.\n\nRolling update deployments are best suited for the following scenarios:\n\n- Gradual service updates: You need to update your service incrementally without taking the entire service offline at once.\n- Limited resource requirements: You want to avoid the additional resource costs of running two complete environments simultaneously (as required by blue/green deployments).\n- Acceptable deployment time: Your application can tolerate a longer deployment process, as rolling updates replace tasks one by one.\n- No need for instant roll back: Your service can tolerate a rollback process that takes minutes rather than seconds.\n- Simple deployment process: You prefer a straightforward deployment approach without the complexity of managing multiple environments, target groups, and listeners.\n- No load balancer requirement: Your service doesn't use or require a load balancer, Application Load Balancer , Network Load Balancer , or Service Connect (which are required for blue/green deployments).\n- Stateful applications: Your application maintains state that makes it difficult to run two parallel environments.\n- Cost sensitivity: You want to minimize deployment costs by not running duplicate environments during deployment.\n\nRolling updates are the default deployment strategy for services and provide a balance between deployment safety and resource efficiency for many common application scenarios.\n- `BLUE_GREEN` : A *blue/green* deployment strategy ( `BLUE_GREEN` ) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed.\n\nAmazon ECS blue/green deployments are best suited for the following scenarios:\n\n- Service validation: When you need to validate new service revisions before directing production traffic to them\n- Zero downtime: When your service requires zero-downtime deployments\n- Instant roll back: When you need the ability to quickly roll back if issues are detected\n- Load balancer requirement: When your service uses Application Load Balancer , Network Load Balancer , or Service Connect\n- External\n\nUse a third-party deployment controller.\n- Blue/green deployment (powered by CodeDeploy )\n\nCodeDeploy installs an updated version of the application as a new replacement task set and reroutes production traffic from the original application task set to the replacement task set. The original task set is terminated after a successful deployment. Use this deployment controller to verify a new deployment of a service before sending production traffic to it.\n\nWhen updating the deployment controller for a service, consider the following depending on the type of migration you're performing.\n\n- If you have a template that contains the `EXTERNAL` deployment controller information as well as `TaskSet` and `PrimaryTaskSet` resources, and you remove the task set resources from the template when updating from `EXTERNAL` to `ECS` , the `DescribeTaskSet` and `DeleteTaskSet` API calls will return a 400 error after the deployment controller is updated to `ECS` . This results in a delete failure on the task set resources, even though the stack transitions to `UPDATE_COMPLETE` status. For more information, see [Resource removed from stack but not deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-resource-removed-not-deleted) in the AWS CloudFormation User Guide. To fix this issue, delete the task sets directly using the Amazon ECS `DeleteTaskSet` API. For more information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the Amazon Elastic Container Service API Reference.\n- If you're migrating from `CODE_DEPLOY` to `ECS` with a new task definition and AWS CloudFormation performs a rollback operation, the Amazon ECS `UpdateService` request fails with the following error:\n\nResource handler returned message: \"Invalid request provided: Unable to update task definition on services with a CODE_DEPLOY deployment controller.\n- After a successful migration from `ECS` to `EXTERNAL` deployment controller, you need to manually remove the `ACTIVE` task set, because Amazon ECS no longer manages the deployment. For information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the Amazon Elastic Container Service API Reference.", "title": "Type", "type": "string" } @@ -130071,7 +130071,7 @@ "type": "string" }, "CapabilityNamespace": { - "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC-UA sources from the AWS IoT SiteWise console, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:version` , where `version` is a number such as `1` .", + "markdownDescription": "The namespace of the capability configuration. For example, if you configure OPC UA sources for an MQTT-enabled gateway, your OPC-UA capability configuration has the namespace `iotsitewise:opcuacollector:3` .", "title": "CapabilityNamespace", "type": "string" } @@ -163306,7 +163306,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags associated with the channel.", + "markdownDescription": "", "title": "Tags", "type": "array" } @@ -163738,7 +163738,7 @@ }, "FilterConfiguration": { "$ref": "#/definitions/AWS::MediaPackageV2::OriginEndpoint.FilterConfiguration", - "markdownDescription": "", + "markdownDescription": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", "title": "FilterConfiguration" }, "ManifestName": { @@ -163782,7 +163782,7 @@ }, "FilterConfiguration": { "$ref": "#/definitions/AWS::MediaPackageV2::OriginEndpoint.FilterConfiguration", - "markdownDescription": "", + "markdownDescription": "Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.", "title": "FilterConfiguration" }, "ManifestName": { @@ -171383,7 +171383,7 @@ "type": "object" }, "StorageCapacity": { - "markdownDescription": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version.", + "markdownDescription": "The default static storage capacity (in gibibytes) for runs that use this workflow or workflow version. The `storageCapacity` can be overwritten at run time. The storage capacity is not required for runs with a `DYNAMIC` storage type.", "title": "StorageCapacity", "type": "number" }, @@ -174213,7 +174213,7 @@ "type": "array" }, "ServiceRoleArn": { - "markdownDescription": "The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.", + "markdownDescription": "The service role that the AWS OpsWorks CM service backend uses to work with your account.", "title": "ServiceRoleArn", "type": "string" }, @@ -224007,7 +224007,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Specifies from which source accounts the service principal has access to the resources in this resource share.", "title": "Sources", "type": "array" }, @@ -224452,7 +224452,7 @@ "type": "string" }, "SourceDBClusterIdentifier": { - "markdownDescription": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing DBCluster.\n- Cannot be specified if `SourceDbClusterResourceId` is specified. You must specify either `SourceDBClusterIdentifier` or `SourceDbClusterResourceId` , but not both.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "title": "SourceDBClusterIdentifier", "type": "string" }, @@ -235200,7 +235200,7 @@ "items": { "$ref": "#/definitions/AWS::S3::Bucket.InventoryConfiguration" }, - "markdownDescription": "Specifies the inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", + "markdownDescription": "Specifies the S3 Inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference* .", "title": "InventoryConfigurations", "type": "array" }, @@ -240838,7 +240838,7 @@ "type": "string" }, "InstanceId": { - "markdownDescription": "The ID of the instance that the SSM document is associated with. You must specify the `InstanceId` or `Targets` property.\n\n> `InstanceId` has been deprecated. To specify an instance ID for an association, use the `Targets` parameter. If you use the parameter `InstanceId` , you cannot use the parameters `AssociationName` , `DocumentVersion` , `MaxErrors` , `MaxConcurrency` , `OutputLocation` , or `ScheduleExpression` . To use these parameters, you must use the `Targets` parameter.", + "markdownDescription": "> `InstanceId` has been deprecated. To specify an instance ID for an association, use the `Targets` parameter. If you use the parameter `InstanceId` , you cannot use the parameters `AssociationName` , `DocumentVersion` , `MaxErrors` , `MaxConcurrency` , `OutputLocation` , or `ScheduleExpression` . To use these parameters, you must use the `Targets` parameter.\n> \n> Note that in some examples later in this page, `InstanceIds` is used as the tag-key name in a `Targets` filter. `InstanceId` is not used as a parameter. \n\nThe ID of the instance that the SSM document is associated with. You must specify the `InstanceId` or `Targets` property.", "title": "InstanceId", "type": "string" }, @@ -241429,7 +241429,7 @@ }, "LoggingInfo": { "$ref": "#/definitions/AWS::SSM::MaintenanceWindowTask.LoggingInfo", - "markdownDescription": "Information about an Amazon S3 bucket to write Run Command task-level logs to.\n\n> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs for Run Command tasks, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS ::SSM::MaintenanceWindowTask MaintenanceWindowRunCommandParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-maintenancewindowruncommandparameters.html) .", + "markdownDescription": "> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs for Run Command tasks, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS ::SSM::MaintenanceWindowTask MaintenanceWindowRunCommandParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-maintenancewindowruncommandparameters.html) . \n\nInformation about an Amazon S3 bucket to write Run Command task-level logs to.", "title": "LoggingInfo" }, "MaxConcurrency": { @@ -241476,7 +241476,7 @@ "title": "TaskInvocationParameters" }, "TaskParameters": { - "markdownDescription": "The parameters to pass to the task when it runs.\n\n> `TaskParameters` has been deprecated. To specify parameters to pass to a task when it runs, instead use the `Parameters` option in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [MaintenanceWindowTaskInvocationParameters](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowTaskInvocationParameters.html) .", + "markdownDescription": "> `TaskParameters` has been deprecated. To specify parameters to pass to a task when it runs, instead use the `Parameters` option in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [MaintenanceWindowTaskInvocationParameters](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_MaintenanceWindowTaskInvocationParameters.html) . \n\nThe parameters to pass to the task when it runs.", "title": "TaskParameters", "type": "object" }, @@ -241802,7 +241802,7 @@ "type": "string" }, "Name": { - "markdownDescription": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter Amazon Resource Name (ARN), is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", + "markdownDescription": "The name of the parameter.\n\n> The reported maximum length of 2048 characters for a parameter name includes 1037 characters that are reserved for internal use by Systems Manager . The maximum length for a parameter name that you specify is 1011 characters.\n> \n> This count of 1011 characters includes the characters in the ARN that precede the name you specify. This ARN length will vary depending on your partition and Region. For example, the following 45 characters count toward the 1011 character maximum for a parameter created in the US East (Ohio) Region: `arn:aws:ssm:us-east-2:111122223333:parameter/` .", "title": "Name", "type": "string" }, @@ -242049,7 +242049,7 @@ "additionalProperties": false, "properties": { "Configuration": { - "markdownDescription": "The value of the yum repo configuration. For example:\n\n`[main]`\n\n`name=MyCustomRepository`\n\n`baseurl=https://my-custom-repository`\n\n`enabled=1`\n\n> For information about other options available for your yum repository configuration, see [dnf.conf(5)](https://docs.aws.amazon.com/https://man7.org/linux/man-pages/man5/dnf.conf.5.html) .", + "markdownDescription": "The value of the repo configuration.\n\n*Example for yum repositories*\n\n`[main]`\n\n`name=MyCustomRepository`\n\n`baseurl=https://my-custom-repository`\n\n`enabled=1`\n\nFor information about other options available for your yum repository configuration, see [dnf.conf(5)](https://docs.aws.amazon.com/https://man7.org/linux/man-pages/man5/dnf.conf.5.html) on the *man7.org* website.\n\n*Examples for Ubuntu Server and Debian Server*\n\n`deb http://security.ubuntu.com/ubuntu jammy main`\n\n`deb https://site.example.com/debian distribution component1 component2 component3`\n\nRepo information for Ubuntu Server repositories must be specifed in a single line. For more examples and information, see [jammy (5) sources.list.5.gz](https://docs.aws.amazon.com/https://manpages.ubuntu.com/manpages/jammy/man5/sources.list.5.html) on the *Ubuntu Server Manuals* website and [sources.list format](https://docs.aws.amazon.com/https://wiki.debian.org/SourcesList#sources.list_format) on the *Debian Wiki* .", "title": "Configuration", "type": "string" }, @@ -262983,7 +262983,7 @@ }, "S3StorageOptions": { "$ref": "#/definitions/AWS::Transfer::Server.S3StorageOptions", - "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", + "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized.\n\n- If using the console, this is enabled by default.\n- If using the API or CLI, this is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", "title": "S3StorageOptions" }, "SecurityPolicyName": { @@ -263149,7 +263149,7 @@ "additionalProperties": false, "properties": { "DirectoryListingOptimization": { - "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", + "markdownDescription": "Specifies whether or not performance for your Amazon S3 directories is optimized.\n\n- If using the console, this is enabled by default.\n- If using the API or CLI, this is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", "title": "DirectoryListingOptimization", "type": "string" }